diff options
Diffstat (limited to 'drivers')
357 files changed, 15053 insertions, 4230 deletions
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index bd3c937b0ac0..7737afb157c3 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c | |||
@@ -90,7 +90,6 @@ static const struct acpi_port_info acpi_protected_ports[] = { | |||
90 | {"PIT2", 0x0048, 0x004B, ACPI_OSI_WIN_XP}, | 90 | {"PIT2", 0x0048, 0x004B, ACPI_OSI_WIN_XP}, |
91 | {"RTC", 0x0070, 0x0071, ACPI_OSI_WIN_XP}, | 91 | {"RTC", 0x0070, 0x0071, ACPI_OSI_WIN_XP}, |
92 | {"CMOS", 0x0074, 0x0076, ACPI_OSI_WIN_XP}, | 92 | {"CMOS", 0x0074, 0x0076, ACPI_OSI_WIN_XP}, |
93 | {"DMA1", 0x0081, 0x0083, ACPI_OSI_WIN_XP}, | ||
94 | {"DMA1L", 0x0087, 0x0087, ACPI_OSI_WIN_XP}, | 93 | {"DMA1L", 0x0087, 0x0087, ACPI_OSI_WIN_XP}, |
95 | {"DMA2", 0x0089, 0x008B, ACPI_OSI_WIN_XP}, | 94 | {"DMA2", 0x0089, 0x008B, ACPI_OSI_WIN_XP}, |
96 | {"DMA2L", 0x008F, 0x008F, ACPI_OSI_WIN_XP}, | 95 | {"DMA2L", 0x008F, 0x008F, ACPI_OSI_WIN_XP}, |
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 05dfdc96802e..d0d550d22a6d 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c | |||
@@ -343,9 +343,6 @@ acpi_system_write_alarm(struct file *file, | |||
343 | } | 343 | } |
344 | #endif /* HAVE_ACPI_LEGACY_ALARM */ | 344 | #endif /* HAVE_ACPI_LEGACY_ALARM */ |
345 | 345 | ||
346 | extern struct list_head acpi_wakeup_device_list; | ||
347 | extern spinlock_t acpi_device_lock; | ||
348 | |||
349 | static int | 346 | static int |
350 | acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | 347 | acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) |
351 | { | 348 | { |
@@ -353,7 +350,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
353 | 350 | ||
354 | seq_printf(seq, "Device\tS-state\t Status Sysfs node\n"); | 351 | seq_printf(seq, "Device\tS-state\t Status Sysfs node\n"); |
355 | 352 | ||
356 | spin_lock(&acpi_device_lock); | 353 | mutex_lock(&acpi_device_lock); |
357 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 354 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
358 | struct acpi_device *dev = | 355 | struct acpi_device *dev = |
359 | container_of(node, struct acpi_device, wakeup_list); | 356 | container_of(node, struct acpi_device, wakeup_list); |
@@ -361,7 +358,6 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
361 | 358 | ||
362 | if (!dev->wakeup.flags.valid) | 359 | if (!dev->wakeup.flags.valid) |
363 | continue; | 360 | continue; |
364 | spin_unlock(&acpi_device_lock); | ||
365 | 361 | ||
366 | ldev = acpi_get_physical_device(dev->handle); | 362 | ldev = acpi_get_physical_device(dev->handle); |
367 | seq_printf(seq, "%s\t S%d\t%c%-8s ", | 363 | seq_printf(seq, "%s\t S%d\t%c%-8s ", |
@@ -376,9 +372,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
376 | seq_printf(seq, "\n"); | 372 | seq_printf(seq, "\n"); |
377 | put_device(ldev); | 373 | put_device(ldev); |
378 | 374 | ||
379 | spin_lock(&acpi_device_lock); | ||
380 | } | 375 | } |
381 | spin_unlock(&acpi_device_lock); | 376 | mutex_unlock(&acpi_device_lock); |
382 | return 0; | 377 | return 0; |
383 | } | 378 | } |
384 | 379 | ||
@@ -409,7 +404,7 @@ acpi_system_write_wakeup_device(struct file *file, | |||
409 | strbuf[len] = '\0'; | 404 | strbuf[len] = '\0'; |
410 | sscanf(strbuf, "%s", str); | 405 | sscanf(strbuf, "%s", str); |
411 | 406 | ||
412 | spin_lock(&acpi_device_lock); | 407 | mutex_lock(&acpi_device_lock); |
413 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 408 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
414 | struct acpi_device *dev = | 409 | struct acpi_device *dev = |
415 | container_of(node, struct acpi_device, wakeup_list); | 410 | container_of(node, struct acpi_device, wakeup_list); |
@@ -446,7 +441,7 @@ acpi_system_write_wakeup_device(struct file *file, | |||
446 | } | 441 | } |
447 | } | 442 | } |
448 | } | 443 | } |
449 | spin_unlock(&acpi_device_lock); | 444 | mutex_unlock(&acpi_device_lock); |
450 | return count; | 445 | return count; |
451 | } | 446 | } |
452 | 447 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 4e6e758bd397..6fe121434ffb 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -145,6 +145,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr, | |||
145 | struct acpi_processor_power *pwr = &pr->power; | 145 | struct acpi_processor_power *pwr = &pr->power; |
146 | u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; | 146 | u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; |
147 | 147 | ||
148 | if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) | ||
149 | return; | ||
150 | |||
148 | /* | 151 | /* |
149 | * Check, if one of the previous states already marked the lapic | 152 | * Check, if one of the previous states already marked the lapic |
150 | * unstable | 153 | * unstable |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 20c23c049207..8ff510b91d88 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -24,7 +24,7 @@ extern struct acpi_device *acpi_root; | |||
24 | 24 | ||
25 | static LIST_HEAD(acpi_device_list); | 25 | static LIST_HEAD(acpi_device_list); |
26 | static LIST_HEAD(acpi_bus_id_list); | 26 | static LIST_HEAD(acpi_bus_id_list); |
27 | DEFINE_SPINLOCK(acpi_device_lock); | 27 | DEFINE_MUTEX(acpi_device_lock); |
28 | LIST_HEAD(acpi_wakeup_device_list); | 28 | LIST_HEAD(acpi_wakeup_device_list); |
29 | 29 | ||
30 | struct acpi_device_bus_id{ | 30 | struct acpi_device_bus_id{ |
@@ -491,7 +491,6 @@ static int acpi_device_register(struct acpi_device *device, | |||
491 | */ | 491 | */ |
492 | INIT_LIST_HEAD(&device->children); | 492 | INIT_LIST_HEAD(&device->children); |
493 | INIT_LIST_HEAD(&device->node); | 493 | INIT_LIST_HEAD(&device->node); |
494 | INIT_LIST_HEAD(&device->g_list); | ||
495 | INIT_LIST_HEAD(&device->wakeup_list); | 494 | INIT_LIST_HEAD(&device->wakeup_list); |
496 | 495 | ||
497 | new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); | 496 | new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); |
@@ -500,7 +499,7 @@ static int acpi_device_register(struct acpi_device *device, | |||
500 | return -ENOMEM; | 499 | return -ENOMEM; |
501 | } | 500 | } |
502 | 501 | ||
503 | spin_lock(&acpi_device_lock); | 502 | mutex_lock(&acpi_device_lock); |
504 | /* | 503 | /* |
505 | * Find suitable bus_id and instance number in acpi_bus_id_list | 504 | * Find suitable bus_id and instance number in acpi_bus_id_list |
506 | * If failed, create one and link it into acpi_bus_id_list | 505 | * If failed, create one and link it into acpi_bus_id_list |
@@ -521,14 +520,12 @@ static int acpi_device_register(struct acpi_device *device, | |||
521 | } | 520 | } |
522 | dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); | 521 | dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); |
523 | 522 | ||
524 | if (device->parent) { | 523 | if (device->parent) |
525 | list_add_tail(&device->node, &device->parent->children); | 524 | list_add_tail(&device->node, &device->parent->children); |
526 | list_add_tail(&device->g_list, &device->parent->g_list); | 525 | |
527 | } else | ||
528 | list_add_tail(&device->g_list, &acpi_device_list); | ||
529 | if (device->wakeup.flags.valid) | 526 | if (device->wakeup.flags.valid) |
530 | list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); | 527 | list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); |
531 | spin_unlock(&acpi_device_lock); | 528 | mutex_unlock(&acpi_device_lock); |
532 | 529 | ||
533 | if (device->parent) | 530 | if (device->parent) |
534 | device->dev.parent = &parent->dev; | 531 | device->dev.parent = &parent->dev; |
@@ -549,28 +546,22 @@ static int acpi_device_register(struct acpi_device *device, | |||
549 | device->removal_type = ACPI_BUS_REMOVAL_NORMAL; | 546 | device->removal_type = ACPI_BUS_REMOVAL_NORMAL; |
550 | return 0; | 547 | return 0; |
551 | end: | 548 | end: |
552 | spin_lock(&acpi_device_lock); | 549 | mutex_lock(&acpi_device_lock); |
553 | if (device->parent) { | 550 | if (device->parent) |
554 | list_del(&device->node); | 551 | list_del(&device->node); |
555 | list_del(&device->g_list); | ||
556 | } else | ||
557 | list_del(&device->g_list); | ||
558 | list_del(&device->wakeup_list); | 552 | list_del(&device->wakeup_list); |
559 | spin_unlock(&acpi_device_lock); | 553 | mutex_unlock(&acpi_device_lock); |
560 | return result; | 554 | return result; |
561 | } | 555 | } |
562 | 556 | ||
563 | static void acpi_device_unregister(struct acpi_device *device, int type) | 557 | static void acpi_device_unregister(struct acpi_device *device, int type) |
564 | { | 558 | { |
565 | spin_lock(&acpi_device_lock); | 559 | mutex_lock(&acpi_device_lock); |
566 | if (device->parent) { | 560 | if (device->parent) |
567 | list_del(&device->node); | 561 | list_del(&device->node); |
568 | list_del(&device->g_list); | ||
569 | } else | ||
570 | list_del(&device->g_list); | ||
571 | 562 | ||
572 | list_del(&device->wakeup_list); | 563 | list_del(&device->wakeup_list); |
573 | spin_unlock(&acpi_device_lock); | 564 | mutex_unlock(&acpi_device_lock); |
574 | 565 | ||
575 | acpi_detach_data(device->handle, acpi_bus_data_handler); | 566 | acpi_detach_data(device->handle, acpi_bus_data_handler); |
576 | 567 | ||
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index cfaf8f5b0a14..8a8f3b3382a6 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h | |||
@@ -5,3 +5,6 @@ extern int acpi_suspend (u32 state); | |||
5 | extern void acpi_enable_wakeup_device_prep(u8 sleep_state); | 5 | extern void acpi_enable_wakeup_device_prep(u8 sleep_state); |
6 | extern void acpi_enable_wakeup_device(u8 sleep_state); | 6 | extern void acpi_enable_wakeup_device(u8 sleep_state); |
7 | extern void acpi_disable_wakeup_device(u8 sleep_state); | 7 | extern void acpi_disable_wakeup_device(u8 sleep_state); |
8 | |||
9 | extern struct list_head acpi_wakeup_device_list; | ||
10 | extern struct mutex acpi_device_lock; | ||
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index e8c143caf0fd..9cd15e8c8932 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -98,6 +98,7 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points."); | |||
98 | static int acpi_thermal_add(struct acpi_device *device); | 98 | static int acpi_thermal_add(struct acpi_device *device); |
99 | static int acpi_thermal_remove(struct acpi_device *device, int type); | 99 | static int acpi_thermal_remove(struct acpi_device *device, int type); |
100 | static int acpi_thermal_resume(struct acpi_device *device); | 100 | static int acpi_thermal_resume(struct acpi_device *device); |
101 | static void acpi_thermal_notify(struct acpi_device *device, u32 event); | ||
101 | static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); | 102 | static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); |
102 | static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); | 103 | static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); |
103 | static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); | 104 | static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); |
@@ -123,6 +124,7 @@ static struct acpi_driver acpi_thermal_driver = { | |||
123 | .add = acpi_thermal_add, | 124 | .add = acpi_thermal_add, |
124 | .remove = acpi_thermal_remove, | 125 | .remove = acpi_thermal_remove, |
125 | .resume = acpi_thermal_resume, | 126 | .resume = acpi_thermal_resume, |
127 | .notify = acpi_thermal_notify, | ||
126 | }, | 128 | }, |
127 | }; | 129 | }; |
128 | 130 | ||
@@ -192,6 +194,7 @@ struct acpi_thermal { | |||
192 | struct acpi_handle_list devices; | 194 | struct acpi_handle_list devices; |
193 | struct thermal_zone_device *thermal_zone; | 195 | struct thermal_zone_device *thermal_zone; |
194 | int tz_enabled; | 196 | int tz_enabled; |
197 | int kelvin_offset; | ||
195 | struct mutex lock; | 198 | struct mutex lock; |
196 | }; | 199 | }; |
197 | 200 | ||
@@ -581,7 +584,7 @@ static void acpi_thermal_check(void *data) | |||
581 | } | 584 | } |
582 | 585 | ||
583 | /* sys I/F for generic thermal sysfs support */ | 586 | /* sys I/F for generic thermal sysfs support */ |
584 | #define KELVIN_TO_MILLICELSIUS(t) (t * 100 - 273200) | 587 | #define KELVIN_TO_MILLICELSIUS(t, off) (((t) - (off)) * 100) |
585 | 588 | ||
586 | static int thermal_get_temp(struct thermal_zone_device *thermal, | 589 | static int thermal_get_temp(struct thermal_zone_device *thermal, |
587 | unsigned long *temp) | 590 | unsigned long *temp) |
@@ -596,7 +599,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, | |||
596 | if (result) | 599 | if (result) |
597 | return result; | 600 | return result; |
598 | 601 | ||
599 | *temp = KELVIN_TO_MILLICELSIUS(tz->temperature); | 602 | *temp = KELVIN_TO_MILLICELSIUS(tz->temperature, tz->kelvin_offset); |
600 | return 0; | 603 | return 0; |
601 | } | 604 | } |
602 | 605 | ||
@@ -702,7 +705,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, | |||
702 | if (tz->trips.critical.flags.valid) { | 705 | if (tz->trips.critical.flags.valid) { |
703 | if (!trip) { | 706 | if (!trip) { |
704 | *temp = KELVIN_TO_MILLICELSIUS( | 707 | *temp = KELVIN_TO_MILLICELSIUS( |
705 | tz->trips.critical.temperature); | 708 | tz->trips.critical.temperature, |
709 | tz->kelvin_offset); | ||
706 | return 0; | 710 | return 0; |
707 | } | 711 | } |
708 | trip--; | 712 | trip--; |
@@ -711,7 +715,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, | |||
711 | if (tz->trips.hot.flags.valid) { | 715 | if (tz->trips.hot.flags.valid) { |
712 | if (!trip) { | 716 | if (!trip) { |
713 | *temp = KELVIN_TO_MILLICELSIUS( | 717 | *temp = KELVIN_TO_MILLICELSIUS( |
714 | tz->trips.hot.temperature); | 718 | tz->trips.hot.temperature, |
719 | tz->kelvin_offset); | ||
715 | return 0; | 720 | return 0; |
716 | } | 721 | } |
717 | trip--; | 722 | trip--; |
@@ -720,7 +725,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, | |||
720 | if (tz->trips.passive.flags.valid) { | 725 | if (tz->trips.passive.flags.valid) { |
721 | if (!trip) { | 726 | if (!trip) { |
722 | *temp = KELVIN_TO_MILLICELSIUS( | 727 | *temp = KELVIN_TO_MILLICELSIUS( |
723 | tz->trips.passive.temperature); | 728 | tz->trips.passive.temperature, |
729 | tz->kelvin_offset); | ||
724 | return 0; | 730 | return 0; |
725 | } | 731 | } |
726 | trip--; | 732 | trip--; |
@@ -730,7 +736,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, | |||
730 | tz->trips.active[i].flags.valid; i++) { | 736 | tz->trips.active[i].flags.valid; i++) { |
731 | if (!trip) { | 737 | if (!trip) { |
732 | *temp = KELVIN_TO_MILLICELSIUS( | 738 | *temp = KELVIN_TO_MILLICELSIUS( |
733 | tz->trips.active[i].temperature); | 739 | tz->trips.active[i].temperature, |
740 | tz->kelvin_offset); | ||
734 | return 0; | 741 | return 0; |
735 | } | 742 | } |
736 | trip--; | 743 | trip--; |
@@ -745,7 +752,8 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal, | |||
745 | 752 | ||
746 | if (tz->trips.critical.flags.valid) { | 753 | if (tz->trips.critical.flags.valid) { |
747 | *temperature = KELVIN_TO_MILLICELSIUS( | 754 | *temperature = KELVIN_TO_MILLICELSIUS( |
748 | tz->trips.critical.temperature); | 755 | tz->trips.critical.temperature, |
756 | tz->kelvin_offset); | ||
749 | return 0; | 757 | return 0; |
750 | } else | 758 | } else |
751 | return -EINVAL; | 759 | return -EINVAL; |
@@ -1264,17 +1272,14 @@ static int acpi_thermal_remove_fs(struct acpi_device *device) | |||
1264 | Driver Interface | 1272 | Driver Interface |
1265 | -------------------------------------------------------------------------- */ | 1273 | -------------------------------------------------------------------------- */ |
1266 | 1274 | ||
1267 | static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data) | 1275 | static void acpi_thermal_notify(struct acpi_device *device, u32 event) |
1268 | { | 1276 | { |
1269 | struct acpi_thermal *tz = data; | 1277 | struct acpi_thermal *tz = acpi_driver_data(device); |
1270 | struct acpi_device *device = NULL; | ||
1271 | 1278 | ||
1272 | 1279 | ||
1273 | if (!tz) | 1280 | if (!tz) |
1274 | return; | 1281 | return; |
1275 | 1282 | ||
1276 | device = tz->device; | ||
1277 | |||
1278 | switch (event) { | 1283 | switch (event) { |
1279 | case ACPI_THERMAL_NOTIFY_TEMPERATURE: | 1284 | case ACPI_THERMAL_NOTIFY_TEMPERATURE: |
1280 | acpi_thermal_check(tz); | 1285 | acpi_thermal_check(tz); |
@@ -1298,8 +1303,6 @@ static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data) | |||
1298 | "Unsupported event [0x%x]\n", event)); | 1303 | "Unsupported event [0x%x]\n", event)); |
1299 | break; | 1304 | break; |
1300 | } | 1305 | } |
1301 | |||
1302 | return; | ||
1303 | } | 1306 | } |
1304 | 1307 | ||
1305 | static int acpi_thermal_get_info(struct acpi_thermal *tz) | 1308 | static int acpi_thermal_get_info(struct acpi_thermal *tz) |
@@ -1334,10 +1337,28 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz) | |||
1334 | return 0; | 1337 | return 0; |
1335 | } | 1338 | } |
1336 | 1339 | ||
1340 | /* | ||
1341 | * The exact offset between Kelvin and degree Celsius is 273.15. However ACPI | ||
1342 | * handles temperature values with a single decimal place. As a consequence, | ||
1343 | * some implementations use an offset of 273.1 and others use an offset of | ||
1344 | * 273.2. Try to find out which one is being used, to present the most | ||
1345 | * accurate and visually appealing number. | ||
1346 | * | ||
1347 | * The heuristic below should work for all ACPI thermal zones which have a | ||
1348 | * critical trip point with a value being a multiple of 0.5 degree Celsius. | ||
1349 | */ | ||
1350 | static void acpi_thermal_guess_offset(struct acpi_thermal *tz) | ||
1351 | { | ||
1352 | if (tz->trips.critical.flags.valid && | ||
1353 | (tz->trips.critical.temperature % 5) == 1) | ||
1354 | tz->kelvin_offset = 2731; | ||
1355 | else | ||
1356 | tz->kelvin_offset = 2732; | ||
1357 | } | ||
1358 | |||
1337 | static int acpi_thermal_add(struct acpi_device *device) | 1359 | static int acpi_thermal_add(struct acpi_device *device) |
1338 | { | 1360 | { |
1339 | int result = 0; | 1361 | int result = 0; |
1340 | acpi_status status = AE_OK; | ||
1341 | struct acpi_thermal *tz = NULL; | 1362 | struct acpi_thermal *tz = NULL; |
1342 | 1363 | ||
1343 | 1364 | ||
@@ -1360,6 +1381,8 @@ static int acpi_thermal_add(struct acpi_device *device) | |||
1360 | if (result) | 1381 | if (result) |
1361 | goto free_memory; | 1382 | goto free_memory; |
1362 | 1383 | ||
1384 | acpi_thermal_guess_offset(tz); | ||
1385 | |||
1363 | result = acpi_thermal_register_thermal_zone(tz); | 1386 | result = acpi_thermal_register_thermal_zone(tz); |
1364 | if (result) | 1387 | if (result) |
1365 | goto free_memory; | 1388 | goto free_memory; |
@@ -1368,21 +1391,11 @@ static int acpi_thermal_add(struct acpi_device *device) | |||
1368 | if (result) | 1391 | if (result) |
1369 | goto unregister_thermal_zone; | 1392 | goto unregister_thermal_zone; |
1370 | 1393 | ||
1371 | status = acpi_install_notify_handler(device->handle, | ||
1372 | ACPI_DEVICE_NOTIFY, | ||
1373 | acpi_thermal_notify, tz); | ||
1374 | if (ACPI_FAILURE(status)) { | ||
1375 | result = -ENODEV; | ||
1376 | goto remove_fs; | ||
1377 | } | ||
1378 | |||
1379 | printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n", | 1394 | printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n", |
1380 | acpi_device_name(device), acpi_device_bid(device), | 1395 | acpi_device_name(device), acpi_device_bid(device), |
1381 | KELVIN_TO_CELSIUS(tz->temperature)); | 1396 | KELVIN_TO_CELSIUS(tz->temperature)); |
1382 | goto end; | 1397 | goto end; |
1383 | 1398 | ||
1384 | remove_fs: | ||
1385 | acpi_thermal_remove_fs(device); | ||
1386 | unregister_thermal_zone: | 1399 | unregister_thermal_zone: |
1387 | thermal_zone_device_unregister(tz->thermal_zone); | 1400 | thermal_zone_device_unregister(tz->thermal_zone); |
1388 | free_memory: | 1401 | free_memory: |
@@ -1393,7 +1406,6 @@ end: | |||
1393 | 1406 | ||
1394 | static int acpi_thermal_remove(struct acpi_device *device, int type) | 1407 | static int acpi_thermal_remove(struct acpi_device *device, int type) |
1395 | { | 1408 | { |
1396 | acpi_status status = AE_OK; | ||
1397 | struct acpi_thermal *tz = NULL; | 1409 | struct acpi_thermal *tz = NULL; |
1398 | 1410 | ||
1399 | if (!device || !acpi_driver_data(device)) | 1411 | if (!device || !acpi_driver_data(device)) |
@@ -1401,10 +1413,6 @@ static int acpi_thermal_remove(struct acpi_device *device, int type) | |||
1401 | 1413 | ||
1402 | tz = acpi_driver_data(device); | 1414 | tz = acpi_driver_data(device); |
1403 | 1415 | ||
1404 | status = acpi_remove_notify_handler(device->handle, | ||
1405 | ACPI_DEVICE_NOTIFY, | ||
1406 | acpi_thermal_notify); | ||
1407 | |||
1408 | acpi_thermal_remove_fs(device); | 1416 | acpi_thermal_remove_fs(device); |
1409 | acpi_thermal_unregister_thermal_zone(tz); | 1417 | acpi_thermal_unregister_thermal_zone(tz); |
1410 | mutex_destroy(&tz->lock); | 1418 | mutex_destroy(&tz->lock); |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index ab06143672bc..cd4fb7543a90 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -79,6 +79,7 @@ module_param(brightness_switch_enabled, bool, 0644); | |||
79 | static int acpi_video_bus_add(struct acpi_device *device); | 79 | static int acpi_video_bus_add(struct acpi_device *device); |
80 | static int acpi_video_bus_remove(struct acpi_device *device, int type); | 80 | static int acpi_video_bus_remove(struct acpi_device *device, int type); |
81 | static int acpi_video_resume(struct acpi_device *device); | 81 | static int acpi_video_resume(struct acpi_device *device); |
82 | static void acpi_video_bus_notify(struct acpi_device *device, u32 event); | ||
82 | 83 | ||
83 | static const struct acpi_device_id video_device_ids[] = { | 84 | static const struct acpi_device_id video_device_ids[] = { |
84 | {ACPI_VIDEO_HID, 0}, | 85 | {ACPI_VIDEO_HID, 0}, |
@@ -94,6 +95,7 @@ static struct acpi_driver acpi_video_bus = { | |||
94 | .add = acpi_video_bus_add, | 95 | .add = acpi_video_bus_add, |
95 | .remove = acpi_video_bus_remove, | 96 | .remove = acpi_video_bus_remove, |
96 | .resume = acpi_video_resume, | 97 | .resume = acpi_video_resume, |
98 | .notify = acpi_video_bus_notify, | ||
97 | }, | 99 | }, |
98 | }; | 100 | }; |
99 | 101 | ||
@@ -1986,17 +1988,15 @@ static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) | |||
1986 | return acpi_video_bus_DOS(video, 0, 1); | 1988 | return acpi_video_bus_DOS(video, 0, 1); |
1987 | } | 1989 | } |
1988 | 1990 | ||
1989 | static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data) | 1991 | static void acpi_video_bus_notify(struct acpi_device *device, u32 event) |
1990 | { | 1992 | { |
1991 | struct acpi_video_bus *video = data; | 1993 | struct acpi_video_bus *video = acpi_driver_data(device); |
1992 | struct acpi_device *device = NULL; | ||
1993 | struct input_dev *input; | 1994 | struct input_dev *input; |
1994 | int keycode; | 1995 | int keycode; |
1995 | 1996 | ||
1996 | if (!video) | 1997 | if (!video) |
1997 | return; | 1998 | return; |
1998 | 1999 | ||
1999 | device = video->device; | ||
2000 | input = video->input; | 2000 | input = video->input; |
2001 | 2001 | ||
2002 | switch (event) { | 2002 | switch (event) { |
@@ -2127,7 +2127,6 @@ static int acpi_video_resume(struct acpi_device *device) | |||
2127 | 2127 | ||
2128 | static int acpi_video_bus_add(struct acpi_device *device) | 2128 | static int acpi_video_bus_add(struct acpi_device *device) |
2129 | { | 2129 | { |
2130 | acpi_status status; | ||
2131 | struct acpi_video_bus *video; | 2130 | struct acpi_video_bus *video; |
2132 | struct input_dev *input; | 2131 | struct input_dev *input; |
2133 | int error; | 2132 | int error; |
@@ -2169,20 +2168,10 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
2169 | acpi_video_bus_get_devices(video, device); | 2168 | acpi_video_bus_get_devices(video, device); |
2170 | acpi_video_bus_start_devices(video); | 2169 | acpi_video_bus_start_devices(video); |
2171 | 2170 | ||
2172 | status = acpi_install_notify_handler(device->handle, | ||
2173 | ACPI_DEVICE_NOTIFY, | ||
2174 | acpi_video_bus_notify, video); | ||
2175 | if (ACPI_FAILURE(status)) { | ||
2176 | printk(KERN_ERR PREFIX | ||
2177 | "Error installing notify handler\n"); | ||
2178 | error = -ENODEV; | ||
2179 | goto err_stop_video; | ||
2180 | } | ||
2181 | |||
2182 | video->input = input = input_allocate_device(); | 2171 | video->input = input = input_allocate_device(); |
2183 | if (!input) { | 2172 | if (!input) { |
2184 | error = -ENOMEM; | 2173 | error = -ENOMEM; |
2185 | goto err_uninstall_notify; | 2174 | goto err_stop_video; |
2186 | } | 2175 | } |
2187 | 2176 | ||
2188 | snprintf(video->phys, sizeof(video->phys), | 2177 | snprintf(video->phys, sizeof(video->phys), |
@@ -2218,9 +2207,6 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
2218 | 2207 | ||
2219 | err_free_input_dev: | 2208 | err_free_input_dev: |
2220 | input_free_device(input); | 2209 | input_free_device(input); |
2221 | err_uninstall_notify: | ||
2222 | acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, | ||
2223 | acpi_video_bus_notify); | ||
2224 | err_stop_video: | 2210 | err_stop_video: |
2225 | acpi_video_bus_stop_devices(video); | 2211 | acpi_video_bus_stop_devices(video); |
2226 | acpi_video_bus_put_devices(video); | 2212 | acpi_video_bus_put_devices(video); |
@@ -2235,7 +2221,6 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
2235 | 2221 | ||
2236 | static int acpi_video_bus_remove(struct acpi_device *device, int type) | 2222 | static int acpi_video_bus_remove(struct acpi_device *device, int type) |
2237 | { | 2223 | { |
2238 | acpi_status status = 0; | ||
2239 | struct acpi_video_bus *video = NULL; | 2224 | struct acpi_video_bus *video = NULL; |
2240 | 2225 | ||
2241 | 2226 | ||
@@ -2245,11 +2230,6 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type) | |||
2245 | video = acpi_driver_data(device); | 2230 | video = acpi_driver_data(device); |
2246 | 2231 | ||
2247 | acpi_video_bus_stop_devices(video); | 2232 | acpi_video_bus_stop_devices(video); |
2248 | |||
2249 | status = acpi_remove_notify_handler(video->device->handle, | ||
2250 | ACPI_DEVICE_NOTIFY, | ||
2251 | acpi_video_bus_notify); | ||
2252 | |||
2253 | acpi_video_bus_put_devices(video); | 2233 | acpi_video_bus_put_devices(video); |
2254 | acpi_video_bus_remove_fs(device); | 2234 | acpi_video_bus_remove_fs(device); |
2255 | 2235 | ||
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index 5aee8c26cc9f..88725dcdf8bc 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c | |||
@@ -12,12 +12,14 @@ | |||
12 | #include "internal.h" | 12 | #include "internal.h" |
13 | #include "sleep.h" | 13 | #include "sleep.h" |
14 | 14 | ||
15 | /* | ||
16 | * We didn't lock acpi_device_lock in the file, because it invokes oops in | ||
17 | * suspend/resume and isn't really required as this is called in S-state. At | ||
18 | * that time, there is no device hotplug | ||
19 | **/ | ||
15 | #define _COMPONENT ACPI_SYSTEM_COMPONENT | 20 | #define _COMPONENT ACPI_SYSTEM_COMPONENT |
16 | ACPI_MODULE_NAME("wakeup_devices") | 21 | ACPI_MODULE_NAME("wakeup_devices") |
17 | 22 | ||
18 | extern struct list_head acpi_wakeup_device_list; | ||
19 | extern spinlock_t acpi_device_lock; | ||
20 | |||
21 | /** | 23 | /** |
22 | * acpi_enable_wakeup_device_prep - prepare wakeup devices | 24 | * acpi_enable_wakeup_device_prep - prepare wakeup devices |
23 | * @sleep_state: ACPI state | 25 | * @sleep_state: ACPI state |
@@ -29,7 +31,6 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) | |||
29 | { | 31 | { |
30 | struct list_head *node, *next; | 32 | struct list_head *node, *next; |
31 | 33 | ||
32 | spin_lock(&acpi_device_lock); | ||
33 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 34 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
34 | struct acpi_device *dev = container_of(node, | 35 | struct acpi_device *dev = container_of(node, |
35 | struct acpi_device, | 36 | struct acpi_device, |
@@ -40,11 +41,8 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) | |||
40 | (sleep_state > (u32) dev->wakeup.sleep_state)) | 41 | (sleep_state > (u32) dev->wakeup.sleep_state)) |
41 | continue; | 42 | continue; |
42 | 43 | ||
43 | spin_unlock(&acpi_device_lock); | ||
44 | acpi_enable_wakeup_device_power(dev, sleep_state); | 44 | acpi_enable_wakeup_device_power(dev, sleep_state); |
45 | spin_lock(&acpi_device_lock); | ||
46 | } | 45 | } |
47 | spin_unlock(&acpi_device_lock); | ||
48 | } | 46 | } |
49 | 47 | ||
50 | /** | 48 | /** |
@@ -60,7 +58,6 @@ void acpi_enable_wakeup_device(u8 sleep_state) | |||
60 | * Caution: this routine must be invoked when interrupt is disabled | 58 | * Caution: this routine must be invoked when interrupt is disabled |
61 | * Refer ACPI2.0: P212 | 59 | * Refer ACPI2.0: P212 |
62 | */ | 60 | */ |
63 | spin_lock(&acpi_device_lock); | ||
64 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 61 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
65 | struct acpi_device *dev = | 62 | struct acpi_device *dev = |
66 | container_of(node, struct acpi_device, wakeup_list); | 63 | container_of(node, struct acpi_device, wakeup_list); |
@@ -74,22 +71,17 @@ void acpi_enable_wakeup_device(u8 sleep_state) | |||
74 | if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) | 71 | if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) |
75 | || sleep_state > (u32) dev->wakeup.sleep_state) { | 72 | || sleep_state > (u32) dev->wakeup.sleep_state) { |
76 | if (dev->wakeup.flags.run_wake) { | 73 | if (dev->wakeup.flags.run_wake) { |
77 | spin_unlock(&acpi_device_lock); | ||
78 | /* set_gpe_type will disable GPE, leave it like that */ | 74 | /* set_gpe_type will disable GPE, leave it like that */ |
79 | acpi_set_gpe_type(dev->wakeup.gpe_device, | 75 | acpi_set_gpe_type(dev->wakeup.gpe_device, |
80 | dev->wakeup.gpe_number, | 76 | dev->wakeup.gpe_number, |
81 | ACPI_GPE_TYPE_RUNTIME); | 77 | ACPI_GPE_TYPE_RUNTIME); |
82 | spin_lock(&acpi_device_lock); | ||
83 | } | 78 | } |
84 | continue; | 79 | continue; |
85 | } | 80 | } |
86 | spin_unlock(&acpi_device_lock); | ||
87 | if (!dev->wakeup.flags.run_wake) | 81 | if (!dev->wakeup.flags.run_wake) |
88 | acpi_enable_gpe(dev->wakeup.gpe_device, | 82 | acpi_enable_gpe(dev->wakeup.gpe_device, |
89 | dev->wakeup.gpe_number); | 83 | dev->wakeup.gpe_number); |
90 | spin_lock(&acpi_device_lock); | ||
91 | } | 84 | } |
92 | spin_unlock(&acpi_device_lock); | ||
93 | } | 85 | } |
94 | 86 | ||
95 | /** | 87 | /** |
@@ -101,7 +93,6 @@ void acpi_disable_wakeup_device(u8 sleep_state) | |||
101 | { | 93 | { |
102 | struct list_head *node, *next; | 94 | struct list_head *node, *next; |
103 | 95 | ||
104 | spin_lock(&acpi_device_lock); | ||
105 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 96 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
106 | struct acpi_device *dev = | 97 | struct acpi_device *dev = |
107 | container_of(node, struct acpi_device, wakeup_list); | 98 | container_of(node, struct acpi_device, wakeup_list); |
@@ -112,19 +103,16 @@ void acpi_disable_wakeup_device(u8 sleep_state) | |||
112 | if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) | 103 | if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) |
113 | || sleep_state > (u32) dev->wakeup.sleep_state) { | 104 | || sleep_state > (u32) dev->wakeup.sleep_state) { |
114 | if (dev->wakeup.flags.run_wake) { | 105 | if (dev->wakeup.flags.run_wake) { |
115 | spin_unlock(&acpi_device_lock); | ||
116 | acpi_set_gpe_type(dev->wakeup.gpe_device, | 106 | acpi_set_gpe_type(dev->wakeup.gpe_device, |
117 | dev->wakeup.gpe_number, | 107 | dev->wakeup.gpe_number, |
118 | ACPI_GPE_TYPE_WAKE_RUN); | 108 | ACPI_GPE_TYPE_WAKE_RUN); |
119 | /* Re-enable it, since set_gpe_type will disable it */ | 109 | /* Re-enable it, since set_gpe_type will disable it */ |
120 | acpi_enable_gpe(dev->wakeup.gpe_device, | 110 | acpi_enable_gpe(dev->wakeup.gpe_device, |
121 | dev->wakeup.gpe_number); | 111 | dev->wakeup.gpe_number); |
122 | spin_lock(&acpi_device_lock); | ||
123 | } | 112 | } |
124 | continue; | 113 | continue; |
125 | } | 114 | } |
126 | 115 | ||
127 | spin_unlock(&acpi_device_lock); | ||
128 | acpi_disable_wakeup_device_power(dev); | 116 | acpi_disable_wakeup_device_power(dev); |
129 | /* Never disable run-wake GPE */ | 117 | /* Never disable run-wake GPE */ |
130 | if (!dev->wakeup.flags.run_wake) { | 118 | if (!dev->wakeup.flags.run_wake) { |
@@ -133,16 +121,14 @@ void acpi_disable_wakeup_device(u8 sleep_state) | |||
133 | acpi_clear_gpe(dev->wakeup.gpe_device, | 121 | acpi_clear_gpe(dev->wakeup.gpe_device, |
134 | dev->wakeup.gpe_number, ACPI_NOT_ISR); | 122 | dev->wakeup.gpe_number, ACPI_NOT_ISR); |
135 | } | 123 | } |
136 | spin_lock(&acpi_device_lock); | ||
137 | } | 124 | } |
138 | spin_unlock(&acpi_device_lock); | ||
139 | } | 125 | } |
140 | 126 | ||
141 | int __init acpi_wakeup_device_init(void) | 127 | int __init acpi_wakeup_device_init(void) |
142 | { | 128 | { |
143 | struct list_head *node, *next; | 129 | struct list_head *node, *next; |
144 | 130 | ||
145 | spin_lock(&acpi_device_lock); | 131 | mutex_lock(&acpi_device_lock); |
146 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 132 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
147 | struct acpi_device *dev = container_of(node, | 133 | struct acpi_device *dev = container_of(node, |
148 | struct acpi_device, | 134 | struct acpi_device, |
@@ -150,15 +136,13 @@ int __init acpi_wakeup_device_init(void) | |||
150 | /* In case user doesn't load button driver */ | 136 | /* In case user doesn't load button driver */ |
151 | if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled) | 137 | if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled) |
152 | continue; | 138 | continue; |
153 | spin_unlock(&acpi_device_lock); | ||
154 | acpi_set_gpe_type(dev->wakeup.gpe_device, | 139 | acpi_set_gpe_type(dev->wakeup.gpe_device, |
155 | dev->wakeup.gpe_number, | 140 | dev->wakeup.gpe_number, |
156 | ACPI_GPE_TYPE_WAKE_RUN); | 141 | ACPI_GPE_TYPE_WAKE_RUN); |
157 | acpi_enable_gpe(dev->wakeup.gpe_device, | 142 | acpi_enable_gpe(dev->wakeup.gpe_device, |
158 | dev->wakeup.gpe_number); | 143 | dev->wakeup.gpe_number); |
159 | dev->wakeup.state.enabled = 1; | 144 | dev->wakeup.state.enabled = 1; |
160 | spin_lock(&acpi_device_lock); | ||
161 | } | 145 | } |
162 | spin_unlock(&acpi_device_lock); | 146 | mutex_unlock(&acpi_device_lock); |
163 | return 0; | 147 | return 0; |
164 | } | 148 | } |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 57be6bea48eb..08186ecbaf8d 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -114,6 +114,7 @@ enum { | |||
114 | board_ahci_sb700 = 5, /* for SB700 and SB800 */ | 114 | board_ahci_sb700 = 5, /* for SB700 and SB800 */ |
115 | board_ahci_mcp65 = 6, | 115 | board_ahci_mcp65 = 6, |
116 | board_ahci_nopmp = 7, | 116 | board_ahci_nopmp = 7, |
117 | board_ahci_yesncq = 8, | ||
117 | 118 | ||
118 | /* global controller registers */ | 119 | /* global controller registers */ |
119 | HOST_CAP = 0x00, /* host capabilities */ | 120 | HOST_CAP = 0x00, /* host capabilities */ |
@@ -469,6 +470,14 @@ static const struct ata_port_info ahci_port_info[] = { | |||
469 | .udma_mask = ATA_UDMA6, | 470 | .udma_mask = ATA_UDMA6, |
470 | .port_ops = &ahci_ops, | 471 | .port_ops = &ahci_ops, |
471 | }, | 472 | }, |
473 | /* board_ahci_yesncq */ | ||
474 | { | ||
475 | AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), | ||
476 | .flags = AHCI_FLAG_COMMON, | ||
477 | .pio_mask = ATA_PIO4, | ||
478 | .udma_mask = ATA_UDMA6, | ||
479 | .port_ops = &ahci_ops, | ||
480 | }, | ||
472 | }; | 481 | }; |
473 | 482 | ||
474 | static const struct pci_device_id ahci_pci_tbl[] = { | 483 | static const struct pci_device_id ahci_pci_tbl[] = { |
@@ -535,30 +544,30 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
535 | { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ | 544 | { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ |
536 | { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ | 545 | { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ |
537 | { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ | 546 | { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ |
538 | { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ | 547 | { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */ |
539 | { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ | 548 | { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */ |
540 | { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ | 549 | { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */ |
541 | { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */ | 550 | { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */ |
542 | { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */ | 551 | { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */ |
543 | { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */ | 552 | { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */ |
544 | { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */ | 553 | { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */ |
545 | { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */ | 554 | { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */ |
546 | { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */ | 555 | { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */ |
547 | { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */ | 556 | { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */ |
548 | { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */ | 557 | { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ |
549 | { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */ | 558 | { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ |
550 | { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */ | 559 | { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ |
551 | { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */ | 560 | { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ |
552 | { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */ | 561 | { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ |
553 | { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */ | 562 | { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */ |
554 | { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */ | 563 | { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */ |
555 | { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */ | 564 | { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */ |
556 | { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */ | 565 | { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */ |
557 | { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */ | 566 | { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */ |
558 | { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */ | 567 | { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */ |
559 | { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */ | 568 | { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */ |
560 | { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */ | 569 | { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */ |
561 | { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */ | 570 | { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */ |
562 | { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ | 571 | { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ |
563 | { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ | 572 | { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ |
564 | { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ | 573 | { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e7ea77cf6069..17c5d48a75d2 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -1231,6 +1231,9 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) | |||
1231 | * | 1231 | * |
1232 | * We follow the current spec and consider that 0x69/0x96 | 1232 | * We follow the current spec and consider that 0x69/0x96 |
1233 | * identifies a port multiplier and 0x3c/0xc3 a SEMB device. | 1233 | * identifies a port multiplier and 0x3c/0xc3 a SEMB device. |
1234 | * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports | ||
1235 | * SEMB signature. This is worked around in | ||
1236 | * ata_dev_read_id(). | ||
1234 | */ | 1237 | */ |
1235 | if ((tf->lbam == 0) && (tf->lbah == 0)) { | 1238 | if ((tf->lbam == 0) && (tf->lbah == 0)) { |
1236 | DPRINTK("found ATA device by sig\n"); | 1239 | DPRINTK("found ATA device by sig\n"); |
@@ -1248,8 +1251,8 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) | |||
1248 | } | 1251 | } |
1249 | 1252 | ||
1250 | if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { | 1253 | if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { |
1251 | printk(KERN_INFO "ata: SEMB device ignored\n"); | 1254 | DPRINTK("found SEMB device by sig (could be ATA device)\n"); |
1252 | return ATA_DEV_SEMB_UNSUP; /* not yet */ | 1255 | return ATA_DEV_SEMB; |
1253 | } | 1256 | } |
1254 | 1257 | ||
1255 | DPRINTK("unknown device\n"); | 1258 | DPRINTK("unknown device\n"); |
@@ -1653,8 +1656,8 @@ unsigned long ata_id_xfermask(const u16 *id) | |||
1653 | /* | 1656 | /* |
1654 | * Process compact flash extended modes | 1657 | * Process compact flash extended modes |
1655 | */ | 1658 | */ |
1656 | int pio = id[163] & 0x7; | 1659 | int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; |
1657 | int dma = (id[163] >> 3) & 7; | 1660 | int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; |
1658 | 1661 | ||
1659 | if (pio) | 1662 | if (pio) |
1660 | pio_mask |= (1 << 5); | 1663 | pio_mask |= (1 << 5); |
@@ -2080,6 +2083,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
2080 | struct ata_taskfile tf; | 2083 | struct ata_taskfile tf; |
2081 | unsigned int err_mask = 0; | 2084 | unsigned int err_mask = 0; |
2082 | const char *reason; | 2085 | const char *reason; |
2086 | bool is_semb = class == ATA_DEV_SEMB; | ||
2083 | int may_fallback = 1, tried_spinup = 0; | 2087 | int may_fallback = 1, tried_spinup = 0; |
2084 | int rc; | 2088 | int rc; |
2085 | 2089 | ||
@@ -2090,6 +2094,8 @@ retry: | |||
2090 | ata_tf_init(dev, &tf); | 2094 | ata_tf_init(dev, &tf); |
2091 | 2095 | ||
2092 | switch (class) { | 2096 | switch (class) { |
2097 | case ATA_DEV_SEMB: | ||
2098 | class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ | ||
2093 | case ATA_DEV_ATA: | 2099 | case ATA_DEV_ATA: |
2094 | tf.command = ATA_CMD_ID_ATA; | 2100 | tf.command = ATA_CMD_ID_ATA; |
2095 | break; | 2101 | break; |
@@ -2126,6 +2132,14 @@ retry: | |||
2126 | return -ENOENT; | 2132 | return -ENOENT; |
2127 | } | 2133 | } |
2128 | 2134 | ||
2135 | if (is_semb) { | ||
2136 | ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on " | ||
2137 | "device w/ SEMB sig, disabled\n"); | ||
2138 | /* SEMB is not supported yet */ | ||
2139 | *p_class = ATA_DEV_SEMB_UNSUP; | ||
2140 | return 0; | ||
2141 | } | ||
2142 | |||
2129 | if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { | 2143 | if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { |
2130 | /* Device or controller might have reported | 2144 | /* Device or controller might have reported |
2131 | * the wrong device class. Give a shot at the | 2145 | * the wrong device class. Give a shot at the |
@@ -2412,7 +2426,8 @@ int ata_dev_configure(struct ata_device *dev) | |||
2412 | /* ATA-specific feature tests */ | 2426 | /* ATA-specific feature tests */ |
2413 | if (dev->class == ATA_DEV_ATA) { | 2427 | if (dev->class == ATA_DEV_ATA) { |
2414 | if (ata_id_is_cfa(id)) { | 2428 | if (ata_id_is_cfa(id)) { |
2415 | if (id[162] & 1) /* CPRM may make this media unusable */ | 2429 | /* CPRM may make this media unusable */ |
2430 | if (id[ATA_ID_CFA_KEY_MGMT] & 1) | ||
2416 | ata_dev_printk(dev, KERN_WARNING, | 2431 | ata_dev_printk(dev, KERN_WARNING, |
2417 | "supports DRM functions and may " | 2432 | "supports DRM functions and may " |
2418 | "not be fully accessable.\n"); | 2433 | "not be fully accessable.\n"); |
@@ -6110,13 +6125,11 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
6110 | ata_port_printk(ap, KERN_INFO, "DUMMY\n"); | 6125 | ata_port_printk(ap, KERN_INFO, "DUMMY\n"); |
6111 | } | 6126 | } |
6112 | 6127 | ||
6113 | /* perform each probe synchronously */ | 6128 | /* perform each probe asynchronously */ |
6114 | DPRINTK("probe begin\n"); | ||
6115 | for (i = 0; i < host->n_ports; i++) { | 6129 | for (i = 0; i < host->n_ports; i++) { |
6116 | struct ata_port *ap = host->ports[i]; | 6130 | struct ata_port *ap = host->ports[i]; |
6117 | async_schedule(async_port_probe, ap); | 6131 | async_schedule(async_port_probe, ap); |
6118 | } | 6132 | } |
6119 | DPRINTK("probe end\n"); | ||
6120 | 6133 | ||
6121 | return 0; | 6134 | return 0; |
6122 | } | 6135 | } |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index b9747fa59e54..2733b0c90b75 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -647,23 +647,45 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
647 | return rc; | 647 | return rc; |
648 | } | 648 | } |
649 | 649 | ||
650 | static int ata_ioc32(struct ata_port *ap) | ||
651 | { | ||
652 | if (ap->flags & ATA_FLAG_PIO_DMA) | ||
653 | return 1; | ||
654 | if (ap->pflags & ATA_PFLAG_PIO32) | ||
655 | return 1; | ||
656 | return 0; | ||
657 | } | ||
658 | |||
650 | int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, | 659 | int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, |
651 | int cmd, void __user *arg) | 660 | int cmd, void __user *arg) |
652 | { | 661 | { |
653 | int val = -EINVAL, rc = -EINVAL; | 662 | int val = -EINVAL, rc = -EINVAL; |
663 | unsigned long flags; | ||
654 | 664 | ||
655 | switch (cmd) { | 665 | switch (cmd) { |
656 | case ATA_IOC_GET_IO32: | 666 | case ATA_IOC_GET_IO32: |
657 | val = 0; | 667 | spin_lock_irqsave(ap->lock, flags); |
668 | val = ata_ioc32(ap); | ||
669 | spin_unlock_irqrestore(ap->lock, flags); | ||
658 | if (copy_to_user(arg, &val, 1)) | 670 | if (copy_to_user(arg, &val, 1)) |
659 | return -EFAULT; | 671 | return -EFAULT; |
660 | return 0; | 672 | return 0; |
661 | 673 | ||
662 | case ATA_IOC_SET_IO32: | 674 | case ATA_IOC_SET_IO32: |
663 | val = (unsigned long) arg; | 675 | val = (unsigned long) arg; |
664 | if (val != 0) | 676 | rc = 0; |
665 | return -EINVAL; | 677 | spin_lock_irqsave(ap->lock, flags); |
666 | return 0; | 678 | if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { |
679 | if (val) | ||
680 | ap->pflags |= ATA_PFLAG_PIO32; | ||
681 | else | ||
682 | ap->pflags &= ~ATA_PFLAG_PIO32; | ||
683 | } else { | ||
684 | if (val != ata_ioc32(ap)) | ||
685 | rc = -EINVAL; | ||
686 | } | ||
687 | spin_unlock_irqrestore(ap->lock, flags); | ||
688 | return rc; | ||
667 | 689 | ||
668 | case HDIO_GET_IDENTITY: | 690 | case HDIO_GET_IDENTITY: |
669 | return ata_get_identity(ap, scsidev, arg); | 691 | return ata_get_identity(ap, scsidev, arg); |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 8332e97a9de3..bb18415d3d63 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -87,6 +87,7 @@ const struct ata_port_operations ata_bmdma32_port_ops = { | |||
87 | .inherits = &ata_bmdma_port_ops, | 87 | .inherits = &ata_bmdma_port_ops, |
88 | 88 | ||
89 | .sff_data_xfer = ata_sff_data_xfer32, | 89 | .sff_data_xfer = ata_sff_data_xfer32, |
90 | .port_start = ata_sff_port_start32, | ||
90 | }; | 91 | }; |
91 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); | 92 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); |
92 | 93 | ||
@@ -769,6 +770,9 @@ unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, | |||
769 | void __iomem *data_addr = ap->ioaddr.data_addr; | 770 | void __iomem *data_addr = ap->ioaddr.data_addr; |
770 | unsigned int words = buflen >> 2; | 771 | unsigned int words = buflen >> 2; |
771 | int slop = buflen & 3; | 772 | int slop = buflen & 3; |
773 | |||
774 | if (!(ap->pflags & ATA_PFLAG_PIO32)) | ||
775 | return ata_sff_data_xfer(dev, buf, buflen, rw); | ||
772 | 776 | ||
773 | /* Transfer multiple of 4 bytes */ | 777 | /* Transfer multiple of 4 bytes */ |
774 | if (rw == READ) | 778 | if (rw == READ) |
@@ -2402,6 +2406,29 @@ int ata_sff_port_start(struct ata_port *ap) | |||
2402 | EXPORT_SYMBOL_GPL(ata_sff_port_start); | 2406 | EXPORT_SYMBOL_GPL(ata_sff_port_start); |
2403 | 2407 | ||
2404 | /** | 2408 | /** |
2409 | * ata_sff_port_start32 - Set port up for dma. | ||
2410 | * @ap: Port to initialize | ||
2411 | * | ||
2412 | * Called just after data structures for each port are | ||
2413 | * initialized. Allocates space for PRD table if the device | ||
2414 | * is DMA capable SFF. | ||
2415 | * | ||
2416 | * May be used as the port_start() entry in ata_port_operations for | ||
2417 | * devices that are capable of 32bit PIO. | ||
2418 | * | ||
2419 | * LOCKING: | ||
2420 | * Inherited from caller. | ||
2421 | */ | ||
2422 | int ata_sff_port_start32(struct ata_port *ap) | ||
2423 | { | ||
2424 | ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; | ||
2425 | if (ap->ioaddr.bmdma_addr) | ||
2426 | return ata_port_start(ap); | ||
2427 | return 0; | ||
2428 | } | ||
2429 | EXPORT_SYMBOL_GPL(ata_sff_port_start32); | ||
2430 | |||
2431 | /** | ||
2405 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. | 2432 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. |
2406 | * @ioaddr: IO address structure to be initialized | 2433 | * @ioaddr: IO address structure to be initialized |
2407 | * | 2434 | * |
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 81ab57003aba..122c786449a9 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> | 8 | * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> |
9 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. | 9 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. |
10 | * Portions Copyright (C) 2003 Red Hat Inc | 10 | * Portions Copyright (C) 2003 Red Hat Inc |
11 | * Portions Copyright (C) 2005-2007 MontaVista Software, Inc. | 11 | * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. |
12 | * | 12 | * |
13 | * TODO | 13 | * TODO |
14 | * Look into engine reset on timeout errors. Should not be required. | 14 | * Look into engine reset on timeout errors. Should not be required. |
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/libata.h> | 24 | #include <linux/libata.h> |
25 | 25 | ||
26 | #define DRV_NAME "pata_hpt37x" | 26 | #define DRV_NAME "pata_hpt37x" |
27 | #define DRV_VERSION "0.6.11" | 27 | #define DRV_VERSION "0.6.12" |
28 | 28 | ||
29 | struct hpt_clock { | 29 | struct hpt_clock { |
30 | u8 xfer_speed; | 30 | u8 xfer_speed; |
@@ -445,23 +445,6 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
445 | } | 445 | } |
446 | 446 | ||
447 | /** | 447 | /** |
448 | * hpt370_bmdma_start - DMA engine begin | ||
449 | * @qc: ATA command | ||
450 | * | ||
451 | * The 370 and 370A want us to reset the DMA engine each time we | ||
452 | * use it. The 372 and later are fine. | ||
453 | */ | ||
454 | |||
455 | static void hpt370_bmdma_start(struct ata_queued_cmd *qc) | ||
456 | { | ||
457 | struct ata_port *ap = qc->ap; | ||
458 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
459 | pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); | ||
460 | udelay(10); | ||
461 | ata_bmdma_start(qc); | ||
462 | } | ||
463 | |||
464 | /** | ||
465 | * hpt370_bmdma_end - DMA engine stop | 448 | * hpt370_bmdma_end - DMA engine stop |
466 | * @qc: ATA command | 449 | * @qc: ATA command |
467 | * | 450 | * |
@@ -598,7 +581,6 @@ static struct scsi_host_template hpt37x_sht = { | |||
598 | static struct ata_port_operations hpt370_port_ops = { | 581 | static struct ata_port_operations hpt370_port_ops = { |
599 | .inherits = &ata_bmdma_port_ops, | 582 | .inherits = &ata_bmdma_port_ops, |
600 | 583 | ||
601 | .bmdma_start = hpt370_bmdma_start, | ||
602 | .bmdma_stop = hpt370_bmdma_stop, | 584 | .bmdma_stop = hpt370_bmdma_stop, |
603 | 585 | ||
604 | .mode_filter = hpt370_filter, | 586 | .mode_filter = hpt370_filter, |
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 3f830f0fe2cc..f72c6c5b820f 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c | |||
@@ -108,6 +108,7 @@ struct legacy_controller { | |||
108 | struct ata_port_operations *ops; | 108 | struct ata_port_operations *ops; |
109 | unsigned int pio_mask; | 109 | unsigned int pio_mask; |
110 | unsigned int flags; | 110 | unsigned int flags; |
111 | unsigned int pflags; | ||
111 | int (*setup)(struct platform_device *, struct legacy_probe *probe, | 112 | int (*setup)(struct platform_device *, struct legacy_probe *probe, |
112 | struct legacy_data *data); | 113 | struct legacy_data *data); |
113 | }; | 114 | }; |
@@ -284,9 +285,11 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev, | |||
284 | unsigned char *buf, unsigned int buflen, int rw) | 285 | unsigned char *buf, unsigned int buflen, int rw) |
285 | { | 286 | { |
286 | int slop = buflen & 3; | 287 | int slop = buflen & 3; |
288 | struct ata_port *ap = dev->link->ap; | ||
289 | |||
287 | /* 32bit I/O capable *and* we need to write a whole number of dwords */ | 290 | /* 32bit I/O capable *and* we need to write a whole number of dwords */ |
288 | if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)) { | 291 | if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3) |
289 | struct ata_port *ap = dev->link->ap; | 292 | && (ap->pflags & ATA_PFLAG_PIO32)) { |
290 | unsigned long flags; | 293 | unsigned long flags; |
291 | 294 | ||
292 | local_irq_save(flags); | 295 | local_irq_save(flags); |
@@ -736,7 +739,8 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf, | |||
736 | struct ata_port *ap = adev->link->ap; | 739 | struct ata_port *ap = adev->link->ap; |
737 | int slop = buflen & 3; | 740 | int slop = buflen & 3; |
738 | 741 | ||
739 | if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3)) { | 742 | if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3) |
743 | && (ap->pflags & ATA_PFLAG_PIO32)) { | ||
740 | if (rw == WRITE) | 744 | if (rw == WRITE) |
741 | iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); | 745 | iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); |
742 | else | 746 | else |
@@ -858,27 +862,30 @@ static struct ata_port_operations winbond_port_ops = { | |||
858 | 862 | ||
859 | static struct legacy_controller controllers[] = { | 863 | static struct legacy_controller controllers[] = { |
860 | {"BIOS", &legacy_port_ops, 0x1F, | 864 | {"BIOS", &legacy_port_ops, 0x1F, |
861 | ATA_FLAG_NO_IORDY, NULL }, | 865 | ATA_FLAG_NO_IORDY, 0, NULL }, |
862 | {"Snooping", &simple_port_ops, 0x1F, | 866 | {"Snooping", &simple_port_ops, 0x1F, |
863 | 0 , NULL }, | 867 | 0, 0, NULL }, |
864 | {"PDC20230", &pdc20230_port_ops, 0x7, | 868 | {"PDC20230", &pdc20230_port_ops, 0x7, |
865 | ATA_FLAG_NO_IORDY, NULL }, | 869 | ATA_FLAG_NO_IORDY, |
870 | ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, NULL }, | ||
866 | {"HT6560A", &ht6560a_port_ops, 0x07, | 871 | {"HT6560A", &ht6560a_port_ops, 0x07, |
867 | ATA_FLAG_NO_IORDY, NULL }, | 872 | ATA_FLAG_NO_IORDY, 0, NULL }, |
868 | {"HT6560B", &ht6560b_port_ops, 0x1F, | 873 | {"HT6560B", &ht6560b_port_ops, 0x1F, |
869 | ATA_FLAG_NO_IORDY, NULL }, | 874 | ATA_FLAG_NO_IORDY, 0, NULL }, |
870 | {"OPTI82C611A", &opti82c611a_port_ops, 0x0F, | 875 | {"OPTI82C611A", &opti82c611a_port_ops, 0x0F, |
871 | 0 , NULL }, | 876 | 0, 0, NULL }, |
872 | {"OPTI82C46X", &opti82c46x_port_ops, 0x0F, | 877 | {"OPTI82C46X", &opti82c46x_port_ops, 0x0F, |
873 | 0 , NULL }, | 878 | 0, 0, NULL }, |
874 | {"QDI6500", &qdi6500_port_ops, 0x07, | 879 | {"QDI6500", &qdi6500_port_ops, 0x07, |
875 | ATA_FLAG_NO_IORDY, qdi_port }, | 880 | ATA_FLAG_NO_IORDY, |
881 | ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port }, | ||
876 | {"QDI6580", &qdi6580_port_ops, 0x1F, | 882 | {"QDI6580", &qdi6580_port_ops, 0x1F, |
877 | 0 , qdi_port }, | 883 | 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port }, |
878 | {"QDI6580DP", &qdi6580dp_port_ops, 0x1F, | 884 | {"QDI6580DP", &qdi6580dp_port_ops, 0x1F, |
879 | 0 , qdi_port }, | 885 | 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port }, |
880 | {"W83759A", &winbond_port_ops, 0x1F, | 886 | {"W83759A", &winbond_port_ops, 0x1F, |
881 | 0 , winbond_port } | 887 | 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, |
888 | winbond_port } | ||
882 | }; | 889 | }; |
883 | 890 | ||
884 | /** | 891 | /** |
@@ -1008,6 +1015,7 @@ static __init int legacy_init_one(struct legacy_probe *probe) | |||
1008 | ap->ops = ops; | 1015 | ap->ops = ops; |
1009 | ap->pio_mask = pio_modes; | 1016 | ap->pio_mask = pio_modes; |
1010 | ap->flags |= ATA_FLAG_SLAVE_POSS | iordy; | 1017 | ap->flags |= ATA_FLAG_SLAVE_POSS | iordy; |
1018 | ap->pflags |= controller->pflags; | ||
1011 | ap->ioaddr.cmd_addr = io_addr; | 1019 | ap->ioaddr.cmd_addr = io_addr; |
1012 | ap->ioaddr.altstatus_addr = ctrl_addr; | 1020 | ap->ioaddr.altstatus_addr = ctrl_addr; |
1013 | ap->ioaddr.ctl_addr = ctrl_addr; | 1021 | ap->ioaddr.ctl_addr = ctrl_addr; |
@@ -1032,6 +1040,7 @@ static __init int legacy_init_one(struct legacy_probe *probe) | |||
1032 | return 0; | 1040 | return 0; |
1033 | } | 1041 | } |
1034 | } | 1042 | } |
1043 | ata_host_detach(host); | ||
1035 | fail: | 1044 | fail: |
1036 | platform_device_unregister(pdev); | 1045 | platform_device_unregister(pdev); |
1037 | return ret; | 1046 | return ret; |
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index 0fb6b1b1e634..dd53a66b19e3 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include <linux/libata.h> | 44 | #include <linux/libata.h> |
45 | 45 | ||
46 | #define DRV_NAME "pata_ninja32" | 46 | #define DRV_NAME "pata_ninja32" |
47 | #define DRV_VERSION "0.1.3" | 47 | #define DRV_VERSION "0.1.5" |
48 | 48 | ||
49 | 49 | ||
50 | /** | 50 | /** |
@@ -86,6 +86,7 @@ static struct ata_port_operations ninja32_port_ops = { | |||
86 | .sff_dev_select = ninja32_dev_select, | 86 | .sff_dev_select = ninja32_dev_select, |
87 | .cable_detect = ata_cable_40wire, | 87 | .cable_detect = ata_cable_40wire, |
88 | .set_piomode = ninja32_set_piomode, | 88 | .set_piomode = ninja32_set_piomode, |
89 | .sff_data_xfer = ata_sff_data_xfer32 | ||
89 | }; | 90 | }; |
90 | 91 | ||
91 | static void ninja32_program(void __iomem *base) | 92 | static void ninja32_program(void __iomem *base) |
@@ -144,6 +145,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
144 | ap->ioaddr.altstatus_addr = base + 0x1E; | 145 | ap->ioaddr.altstatus_addr = base + 0x1E; |
145 | ap->ioaddr.bmdma_addr = base; | 146 | ap->ioaddr.bmdma_addr = base; |
146 | ata_sff_std_ports(&ap->ioaddr); | 147 | ata_sff_std_ports(&ap->ioaddr); |
148 | ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; | ||
147 | 149 | ||
148 | ninja32_program(base); | 150 | ninja32_program(base); |
149 | /* FIXME: Should we disable them at remove ? */ | 151 | /* FIXME: Should we disable them at remove ? */ |
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index b08e6e0f82b6..45657cacec43 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -62,7 +62,7 @@ | |||
62 | #include <linux/dmi.h> | 62 | #include <linux/dmi.h> |
63 | 63 | ||
64 | #define DRV_NAME "pata_via" | 64 | #define DRV_NAME "pata_via" |
65 | #define DRV_VERSION "0.3.3" | 65 | #define DRV_VERSION "0.3.4" |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx | 68 | * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx |
@@ -136,6 +136,9 @@ static const struct via_isa_bridge { | |||
136 | { NULL } | 136 | { NULL } |
137 | }; | 137 | }; |
138 | 138 | ||
139 | struct via_port { | ||
140 | u8 cached_device; | ||
141 | }; | ||
139 | 142 | ||
140 | /* | 143 | /* |
141 | * Cable special cases | 144 | * Cable special cases |
@@ -346,14 +349,70 @@ static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
346 | */ | 349 | */ |
347 | static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | 350 | static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) |
348 | { | 351 | { |
349 | struct ata_taskfile tmp_tf; | 352 | struct ata_ioports *ioaddr = &ap->ioaddr; |
353 | struct via_port *vp = ap->private_data; | ||
354 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; | ||
355 | int newctl = 0; | ||
356 | |||
357 | if (tf->ctl != ap->last_ctl) { | ||
358 | iowrite8(tf->ctl, ioaddr->ctl_addr); | ||
359 | ap->last_ctl = tf->ctl; | ||
360 | ata_wait_idle(ap); | ||
361 | newctl = 1; | ||
362 | } | ||
363 | |||
364 | if (tf->flags & ATA_TFLAG_DEVICE) { | ||
365 | iowrite8(tf->device, ioaddr->device_addr); | ||
366 | vp->cached_device = tf->device; | ||
367 | } else if (newctl) | ||
368 | iowrite8(vp->cached_device, ioaddr->device_addr); | ||
369 | |||
370 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | ||
371 | WARN_ON_ONCE(!ioaddr->ctl_addr); | ||
372 | iowrite8(tf->hob_feature, ioaddr->feature_addr); | ||
373 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); | ||
374 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); | ||
375 | iowrite8(tf->hob_lbam, ioaddr->lbam_addr); | ||
376 | iowrite8(tf->hob_lbah, ioaddr->lbah_addr); | ||
377 | VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", | ||
378 | tf->hob_feature, | ||
379 | tf->hob_nsect, | ||
380 | tf->hob_lbal, | ||
381 | tf->hob_lbam, | ||
382 | tf->hob_lbah); | ||
383 | } | ||
350 | 384 | ||
351 | if (ap->ctl != ap->last_ctl && !(tf->flags & ATA_TFLAG_DEVICE)) { | 385 | if (is_addr) { |
352 | tmp_tf = *tf; | 386 | iowrite8(tf->feature, ioaddr->feature_addr); |
353 | tmp_tf.flags |= ATA_TFLAG_DEVICE; | 387 | iowrite8(tf->nsect, ioaddr->nsect_addr); |
354 | tf = &tmp_tf; | 388 | iowrite8(tf->lbal, ioaddr->lbal_addr); |
389 | iowrite8(tf->lbam, ioaddr->lbam_addr); | ||
390 | iowrite8(tf->lbah, ioaddr->lbah_addr); | ||
391 | VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", | ||
392 | tf->feature, | ||
393 | tf->nsect, | ||
394 | tf->lbal, | ||
395 | tf->lbam, | ||
396 | tf->lbah); | ||
355 | } | 397 | } |
356 | ata_sff_tf_load(ap, tf); | 398 | |
399 | ata_wait_idle(ap); | ||
400 | } | ||
401 | |||
402 | static int via_port_start(struct ata_port *ap) | ||
403 | { | ||
404 | struct via_port *vp; | ||
405 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
406 | |||
407 | int ret = ata_sff_port_start(ap); | ||
408 | if (ret < 0) | ||
409 | return ret; | ||
410 | |||
411 | vp = devm_kzalloc(&pdev->dev, sizeof(struct via_port), GFP_KERNEL); | ||
412 | if (vp == NULL) | ||
413 | return -ENOMEM; | ||
414 | ap->private_data = vp; | ||
415 | return 0; | ||
357 | } | 416 | } |
358 | 417 | ||
359 | static struct scsi_host_template via_sht = { | 418 | static struct scsi_host_template via_sht = { |
@@ -367,6 +426,7 @@ static struct ata_port_operations via_port_ops = { | |||
367 | .set_dmamode = via_set_dmamode, | 426 | .set_dmamode = via_set_dmamode, |
368 | .prereset = via_pre_reset, | 427 | .prereset = via_pre_reset, |
369 | .sff_tf_load = via_tf_load, | 428 | .sff_tf_load = via_tf_load, |
429 | .port_start = via_port_start, | ||
370 | }; | 430 | }; |
371 | 431 | ||
372 | static struct ata_port_operations via_port_ops_noirq = { | 432 | static struct ata_port_operations via_port_ops_noirq = { |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 37ae5dc1070c..870dcfd82357 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -1881,6 +1881,39 @@ static u8 mv_bmdma_status(struct ata_port *ap) | |||
1881 | return status; | 1881 | return status; |
1882 | } | 1882 | } |
1883 | 1883 | ||
1884 | static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) | ||
1885 | { | ||
1886 | struct ata_taskfile *tf = &qc->tf; | ||
1887 | /* | ||
1888 | * Workaround for 88SX60x1 FEr SATA#24. | ||
1889 | * | ||
1890 | * Chip may corrupt WRITEs if multi_count >= 4kB. | ||
1891 | * Note that READs are unaffected. | ||
1892 | * | ||
1893 | * It's not clear if this errata really means "4K bytes", | ||
1894 | * or if it always happens for multi_count > 7 | ||
1895 | * regardless of device sector_size. | ||
1896 | * | ||
1897 | * So, for safety, any write with multi_count > 7 | ||
1898 | * gets converted here into a regular PIO write instead: | ||
1899 | */ | ||
1900 | if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { | ||
1901 | if (qc->dev->multi_count > 7) { | ||
1902 | switch (tf->command) { | ||
1903 | case ATA_CMD_WRITE_MULTI: | ||
1904 | tf->command = ATA_CMD_PIO_WRITE; | ||
1905 | break; | ||
1906 | case ATA_CMD_WRITE_MULTI_FUA_EXT: | ||
1907 | tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ | ||
1908 | /* fall through */ | ||
1909 | case ATA_CMD_WRITE_MULTI_EXT: | ||
1910 | tf->command = ATA_CMD_PIO_WRITE_EXT; | ||
1911 | break; | ||
1912 | } | ||
1913 | } | ||
1914 | } | ||
1915 | } | ||
1916 | |||
1884 | /** | 1917 | /** |
1885 | * mv_qc_prep - Host specific command preparation. | 1918 | * mv_qc_prep - Host specific command preparation. |
1886 | * @qc: queued command to prepare | 1919 | * @qc: queued command to prepare |
@@ -1898,17 +1931,24 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1898 | struct ata_port *ap = qc->ap; | 1931 | struct ata_port *ap = qc->ap; |
1899 | struct mv_port_priv *pp = ap->private_data; | 1932 | struct mv_port_priv *pp = ap->private_data; |
1900 | __le16 *cw; | 1933 | __le16 *cw; |
1901 | struct ata_taskfile *tf; | 1934 | struct ata_taskfile *tf = &qc->tf; |
1902 | u16 flags = 0; | 1935 | u16 flags = 0; |
1903 | unsigned in_index; | 1936 | unsigned in_index; |
1904 | 1937 | ||
1905 | if ((qc->tf.protocol != ATA_PROT_DMA) && | 1938 | switch (tf->protocol) { |
1906 | (qc->tf.protocol != ATA_PROT_NCQ)) | 1939 | case ATA_PROT_DMA: |
1940 | case ATA_PROT_NCQ: | ||
1941 | break; /* continue below */ | ||
1942 | case ATA_PROT_PIO: | ||
1943 | mv_rw_multi_errata_sata24(qc); | ||
1944 | return; | ||
1945 | default: | ||
1907 | return; | 1946 | return; |
1947 | } | ||
1908 | 1948 | ||
1909 | /* Fill in command request block | 1949 | /* Fill in command request block |
1910 | */ | 1950 | */ |
1911 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | 1951 | if (!(tf->flags & ATA_TFLAG_WRITE)) |
1912 | flags |= CRQB_FLAG_READ; | 1952 | flags |= CRQB_FLAG_READ; |
1913 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1953 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
1914 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1954 | flags |= qc->tag << CRQB_TAG_SHIFT; |
@@ -1924,7 +1964,6 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1924 | pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); | 1964 | pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); |
1925 | 1965 | ||
1926 | cw = &pp->crqb[in_index].ata_cmd[0]; | 1966 | cw = &pp->crqb[in_index].ata_cmd[0]; |
1927 | tf = &qc->tf; | ||
1928 | 1967 | ||
1929 | /* Sadly, the CRQB cannot accomodate all registers--there are | 1968 | /* Sadly, the CRQB cannot accomodate all registers--there are |
1930 | * only 11 bytes...so we must pick and choose required | 1969 | * only 11 bytes...so we must pick and choose required |
@@ -1990,16 +2029,16 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1990 | struct ata_port *ap = qc->ap; | 2029 | struct ata_port *ap = qc->ap; |
1991 | struct mv_port_priv *pp = ap->private_data; | 2030 | struct mv_port_priv *pp = ap->private_data; |
1992 | struct mv_crqb_iie *crqb; | 2031 | struct mv_crqb_iie *crqb; |
1993 | struct ata_taskfile *tf; | 2032 | struct ata_taskfile *tf = &qc->tf; |
1994 | unsigned in_index; | 2033 | unsigned in_index; |
1995 | u32 flags = 0; | 2034 | u32 flags = 0; |
1996 | 2035 | ||
1997 | if ((qc->tf.protocol != ATA_PROT_DMA) && | 2036 | if ((tf->protocol != ATA_PROT_DMA) && |
1998 | (qc->tf.protocol != ATA_PROT_NCQ)) | 2037 | (tf->protocol != ATA_PROT_NCQ)) |
1999 | return; | 2038 | return; |
2000 | 2039 | ||
2001 | /* Fill in Gen IIE command request block */ | 2040 | /* Fill in Gen IIE command request block */ |
2002 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | 2041 | if (!(tf->flags & ATA_TFLAG_WRITE)) |
2003 | flags |= CRQB_FLAG_READ; | 2042 | flags |= CRQB_FLAG_READ; |
2004 | 2043 | ||
2005 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 2044 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
@@ -2015,7 +2054,6 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
2015 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); | 2054 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); |
2016 | crqb->flags = cpu_to_le32(flags); | 2055 | crqb->flags = cpu_to_le32(flags); |
2017 | 2056 | ||
2018 | tf = &qc->tf; | ||
2019 | crqb->ata_cmd[0] = cpu_to_le32( | 2057 | crqb->ata_cmd[0] = cpu_to_le32( |
2020 | (tf->command << 16) | | 2058 | (tf->command << 16) | |
2021 | (tf->feature << 24) | 2059 | (tf->feature << 24) |
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 98e8c50703b3..bdd43c7f432e 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -566,7 +566,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
566 | static int printed_version; | 566 | static int printed_version; |
567 | unsigned int i; | 567 | unsigned int i; |
568 | int rc; | 568 | int rc; |
569 | struct ata_host *host; | 569 | struct ata_host *host = NULL; |
570 | int board_id = (int) ent->driver_data; | 570 | int board_id = (int) ent->driver_data; |
571 | const unsigned *bar_sizes; | 571 | const unsigned *bar_sizes; |
572 | 572 | ||
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index be204308cc1b..9359613addc5 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c | |||
@@ -1059,7 +1059,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1059 | goto out; | 1059 | goto out; |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | err = pci_set_dma_mask(dev, DMA_32BIT_MASK); | 1062 | err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); |
1063 | if (err) { | 1063 | if (err) { |
1064 | dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n"); | 1064 | dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n"); |
1065 | goto out; | 1065 | goto out; |
diff --git a/drivers/base/base.h b/drivers/base/base.h index ddc97496db4a..b528145a078f 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h | |||
@@ -115,7 +115,7 @@ extern int driver_probe_device(struct device_driver *drv, struct device *dev); | |||
115 | static inline int driver_match_device(struct device_driver *drv, | 115 | static inline int driver_match_device(struct device_driver *drv, |
116 | struct device *dev) | 116 | struct device *dev) |
117 | { | 117 | { |
118 | return drv->bus->match && drv->bus->match(dev, drv); | 118 | return drv->bus->match ? drv->bus->match(dev, drv) : 1; |
119 | } | 119 | } |
120 | 120 | ||
121 | extern void sysdev_shutdown(void); | 121 | extern void sysdev_shutdown(void); |
diff --git a/drivers/base/core.c b/drivers/base/core.c index e73c92d13a23..d230ff4b3eec 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -1142,6 +1142,9 @@ int device_for_each_child(struct device *parent, void *data, | |||
1142 | struct device *child; | 1142 | struct device *child; |
1143 | int error = 0; | 1143 | int error = 0; |
1144 | 1144 | ||
1145 | if (!parent->p) | ||
1146 | return 0; | ||
1147 | |||
1145 | klist_iter_init(&parent->p->klist_children, &i); | 1148 | klist_iter_init(&parent->p->klist_children, &i); |
1146 | while ((child = next_device(&i)) && !error) | 1149 | while ((child = next_device(&i)) && !error) |
1147 | error = fn(child, data); | 1150 | error = fn(child, data); |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index d2198f64ad4e..b5b6c973a2e0 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -990,6 +990,8 @@ int __init platform_bus_init(void) | |||
990 | { | 990 | { |
991 | int error; | 991 | int error; |
992 | 992 | ||
993 | early_platform_cleanup(); | ||
994 | |||
993 | error = device_register(&platform_bus); | 995 | error = device_register(&platform_bus); |
994 | if (error) | 996 | if (error) |
995 | return error; | 997 | return error; |
@@ -1020,3 +1022,240 @@ u64 dma_get_required_mask(struct device *dev) | |||
1020 | } | 1022 | } |
1021 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | 1023 | EXPORT_SYMBOL_GPL(dma_get_required_mask); |
1022 | #endif | 1024 | #endif |
1025 | |||
1026 | static __initdata LIST_HEAD(early_platform_driver_list); | ||
1027 | static __initdata LIST_HEAD(early_platform_device_list); | ||
1028 | |||
1029 | /** | ||
1030 | * early_platform_driver_register | ||
1031 | * @edrv: early_platform driver structure | ||
1032 | * @buf: string passed from early_param() | ||
1033 | */ | ||
1034 | int __init early_platform_driver_register(struct early_platform_driver *epdrv, | ||
1035 | char *buf) | ||
1036 | { | ||
1037 | unsigned long index; | ||
1038 | int n; | ||
1039 | |||
1040 | /* Simply add the driver to the end of the global list. | ||
1041 | * Drivers will by default be put on the list in compiled-in order. | ||
1042 | */ | ||
1043 | if (!epdrv->list.next) { | ||
1044 | INIT_LIST_HEAD(&epdrv->list); | ||
1045 | list_add_tail(&epdrv->list, &early_platform_driver_list); | ||
1046 | } | ||
1047 | |||
1048 | /* If the user has specified device then make sure the driver | ||
1049 | * gets prioritized. The driver of the last device specified on | ||
1050 | * command line will be put first on the list. | ||
1051 | */ | ||
1052 | n = strlen(epdrv->pdrv->driver.name); | ||
1053 | if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { | ||
1054 | list_move(&epdrv->list, &early_platform_driver_list); | ||
1055 | |||
1056 | if (!strcmp(buf, epdrv->pdrv->driver.name)) | ||
1057 | epdrv->requested_id = -1; | ||
1058 | else if (buf[n] == '.' && strict_strtoul(&buf[n + 1], 10, | ||
1059 | &index) == 0) | ||
1060 | epdrv->requested_id = index; | ||
1061 | else | ||
1062 | epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; | ||
1063 | } | ||
1064 | |||
1065 | return 0; | ||
1066 | } | ||
1067 | |||
1068 | /** | ||
1069 | * early_platform_add_devices - add a numbers of early platform devices | ||
1070 | * @devs: array of early platform devices to add | ||
1071 | * @num: number of early platform devices in array | ||
1072 | */ | ||
1073 | void __init early_platform_add_devices(struct platform_device **devs, int num) | ||
1074 | { | ||
1075 | struct device *dev; | ||
1076 | int i; | ||
1077 | |||
1078 | /* simply add the devices to list */ | ||
1079 | for (i = 0; i < num; i++) { | ||
1080 | dev = &devs[i]->dev; | ||
1081 | |||
1082 | if (!dev->devres_head.next) { | ||
1083 | INIT_LIST_HEAD(&dev->devres_head); | ||
1084 | list_add_tail(&dev->devres_head, | ||
1085 | &early_platform_device_list); | ||
1086 | } | ||
1087 | } | ||
1088 | } | ||
1089 | |||
1090 | /** | ||
1091 | * early_platform_driver_register_all | ||
1092 | * @class_str: string to identify early platform driver class | ||
1093 | */ | ||
1094 | void __init early_platform_driver_register_all(char *class_str) | ||
1095 | { | ||
1096 | /* The "class_str" parameter may or may not be present on the kernel | ||
1097 | * command line. If it is present then there may be more than one | ||
1098 | * matching parameter. | ||
1099 | * | ||
1100 | * Since we register our early platform drivers using early_param() | ||
1101 | * we need to make sure that they also get registered in the case | ||
1102 | * when the parameter is missing from the kernel command line. | ||
1103 | * | ||
1104 | * We use parse_early_options() to make sure the early_param() gets | ||
1105 | * called at least once. The early_param() may be called more than | ||
1106 | * once since the name of the preferred device may be specified on | ||
1107 | * the kernel command line. early_platform_driver_register() handles | ||
1108 | * this case for us. | ||
1109 | */ | ||
1110 | parse_early_options(class_str); | ||
1111 | } | ||
1112 | |||
1113 | /** | ||
1114 | * early_platform_match | ||
1115 | * @edrv: early platform driver structure | ||
1116 | * @id: id to match against | ||
1117 | */ | ||
1118 | static __init struct platform_device * | ||
1119 | early_platform_match(struct early_platform_driver *epdrv, int id) | ||
1120 | { | ||
1121 | struct platform_device *pd; | ||
1122 | |||
1123 | list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) | ||
1124 | if (platform_match(&pd->dev, &epdrv->pdrv->driver)) | ||
1125 | if (pd->id == id) | ||
1126 | return pd; | ||
1127 | |||
1128 | return NULL; | ||
1129 | } | ||
1130 | |||
1131 | /** | ||
1132 | * early_platform_left | ||
1133 | * @edrv: early platform driver structure | ||
1134 | * @id: return true if id or above exists | ||
1135 | */ | ||
1136 | static __init int early_platform_left(struct early_platform_driver *epdrv, | ||
1137 | int id) | ||
1138 | { | ||
1139 | struct platform_device *pd; | ||
1140 | |||
1141 | list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) | ||
1142 | if (platform_match(&pd->dev, &epdrv->pdrv->driver)) | ||
1143 | if (pd->id >= id) | ||
1144 | return 1; | ||
1145 | |||
1146 | return 0; | ||
1147 | } | ||
1148 | |||
1149 | /** | ||
1150 | * early_platform_driver_probe_id | ||
1151 | * @class_str: string to identify early platform driver class | ||
1152 | * @id: id to match against | ||
1153 | * @nr_probe: number of platform devices to successfully probe before exiting | ||
1154 | */ | ||
1155 | static int __init early_platform_driver_probe_id(char *class_str, | ||
1156 | int id, | ||
1157 | int nr_probe) | ||
1158 | { | ||
1159 | struct early_platform_driver *epdrv; | ||
1160 | struct platform_device *match; | ||
1161 | int match_id; | ||
1162 | int n = 0; | ||
1163 | int left = 0; | ||
1164 | |||
1165 | list_for_each_entry(epdrv, &early_platform_driver_list, list) { | ||
1166 | /* only use drivers matching our class_str */ | ||
1167 | if (strcmp(class_str, epdrv->class_str)) | ||
1168 | continue; | ||
1169 | |||
1170 | if (id == -2) { | ||
1171 | match_id = epdrv->requested_id; | ||
1172 | left = 1; | ||
1173 | |||
1174 | } else { | ||
1175 | match_id = id; | ||
1176 | left += early_platform_left(epdrv, id); | ||
1177 | |||
1178 | /* skip requested id */ | ||
1179 | switch (epdrv->requested_id) { | ||
1180 | case EARLY_PLATFORM_ID_ERROR: | ||
1181 | case EARLY_PLATFORM_ID_UNSET: | ||
1182 | break; | ||
1183 | default: | ||
1184 | if (epdrv->requested_id == id) | ||
1185 | match_id = EARLY_PLATFORM_ID_UNSET; | ||
1186 | } | ||
1187 | } | ||
1188 | |||
1189 | switch (match_id) { | ||
1190 | case EARLY_PLATFORM_ID_ERROR: | ||
1191 | pr_warning("%s: unable to parse %s parameter\n", | ||
1192 | class_str, epdrv->pdrv->driver.name); | ||
1193 | /* fall-through */ | ||
1194 | case EARLY_PLATFORM_ID_UNSET: | ||
1195 | match = NULL; | ||
1196 | break; | ||
1197 | default: | ||
1198 | match = early_platform_match(epdrv, match_id); | ||
1199 | } | ||
1200 | |||
1201 | if (match) { | ||
1202 | if (epdrv->pdrv->probe(match)) | ||
1203 | pr_warning("%s: unable to probe %s early.\n", | ||
1204 | class_str, match->name); | ||
1205 | else | ||
1206 | n++; | ||
1207 | } | ||
1208 | |||
1209 | if (n >= nr_probe) | ||
1210 | break; | ||
1211 | } | ||
1212 | |||
1213 | if (left) | ||
1214 | return n; | ||
1215 | else | ||
1216 | return -ENODEV; | ||
1217 | } | ||
1218 | |||
1219 | /** | ||
1220 | * early_platform_driver_probe | ||
1221 | * @class_str: string to identify early platform driver class | ||
1222 | * @nr_probe: number of platform devices to successfully probe before exiting | ||
1223 | * @user_only: only probe user specified early platform devices | ||
1224 | */ | ||
1225 | int __init early_platform_driver_probe(char *class_str, | ||
1226 | int nr_probe, | ||
1227 | int user_only) | ||
1228 | { | ||
1229 | int k, n, i; | ||
1230 | |||
1231 | n = 0; | ||
1232 | for (i = -2; n < nr_probe; i++) { | ||
1233 | k = early_platform_driver_probe_id(class_str, i, nr_probe - n); | ||
1234 | |||
1235 | if (k < 0) | ||
1236 | break; | ||
1237 | |||
1238 | n += k; | ||
1239 | |||
1240 | if (user_only) | ||
1241 | break; | ||
1242 | } | ||
1243 | |||
1244 | return n; | ||
1245 | } | ||
1246 | |||
1247 | /** | ||
1248 | * early_platform_cleanup - clean up early platform code | ||
1249 | */ | ||
1250 | void __init early_platform_cleanup(void) | ||
1251 | { | ||
1252 | struct platform_device *pd, *pd2; | ||
1253 | |||
1254 | /* clean up the devres list used to chain devices */ | ||
1255 | list_for_each_entry_safe(pd, pd2, &early_platform_device_list, | ||
1256 | dev.devres_head) { | ||
1257 | list_del(&pd->dev.devres_head); | ||
1258 | memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head)); | ||
1259 | } | ||
1260 | } | ||
1261 | |||
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index bdd4f5f45575..5f7e64ba87e5 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -275,8 +275,10 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page, | |||
275 | if (rw == READ) { | 275 | if (rw == READ) { |
276 | copy_from_brd(mem + off, brd, sector, len); | 276 | copy_from_brd(mem + off, brd, sector, len); |
277 | flush_dcache_page(page); | 277 | flush_dcache_page(page); |
278 | } else | 278 | } else { |
279 | flush_dcache_page(page); | ||
279 | copy_to_brd(brd, mem + off, sector, len); | 280 | copy_to_brd(brd, mem + off, sector, len); |
281 | } | ||
280 | kunmap_atomic(mem, KM_USER0); | 282 | kunmap_atomic(mem, KM_USER0); |
281 | 283 | ||
282 | out: | 284 | out: |
@@ -436,6 +438,7 @@ static struct brd_device *brd_alloc(int i) | |||
436 | if (!brd->brd_queue) | 438 | if (!brd->brd_queue) |
437 | goto out_free_dev; | 439 | goto out_free_dev; |
438 | blk_queue_make_request(brd->brd_queue, brd_make_request); | 440 | blk_queue_make_request(brd->brd_queue, brd_make_request); |
441 | blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); | ||
439 | blk_queue_max_sectors(brd->brd_queue, 1024); | 442 | blk_queue_max_sectors(brd->brd_queue, 1024); |
440 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); | 443 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); |
441 | 444 | ||
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 0ef6f08aa6ea..4d4d5e0d3fa6 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -3505,7 +3505,7 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u | |||
3505 | /* The Inbound Post Queue only accepts 32-bit physical addresses for the | 3505 | /* The Inbound Post Queue only accepts 32-bit physical addresses for the |
3506 | CCISS commands, so they must be allocated from the lower 4GiB of | 3506 | CCISS commands, so they must be allocated from the lower 4GiB of |
3507 | memory. */ | 3507 | memory. */ |
3508 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | 3508 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
3509 | if (err) { | 3509 | if (err) { |
3510 | iounmap(vaddr); | 3510 | iounmap(vaddr); |
3511 | return -ENOMEM; | 3511 | return -ENOMEM; |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 69b7f8e77596..689cd27ac890 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -1025,6 +1025,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1025 | { | 1025 | { |
1026 | struct urb *urb = &sc->work_urb; | 1026 | struct urb *urb = &sc->work_urb; |
1027 | struct bulk_cs_wrap *bcs; | 1027 | struct bulk_cs_wrap *bcs; |
1028 | int endp; | ||
1028 | int len; | 1029 | int len; |
1029 | int rc; | 1030 | int rc; |
1030 | 1031 | ||
@@ -1033,6 +1034,10 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1033 | return; | 1034 | return; |
1034 | } | 1035 | } |
1035 | 1036 | ||
1037 | endp = usb_pipeendpoint(sc->last_pipe); | ||
1038 | if (usb_pipein(sc->last_pipe)) | ||
1039 | endp |= USB_DIR_IN; | ||
1040 | |||
1036 | if (cmd->state == UB_CMDST_CLEAR) { | 1041 | if (cmd->state == UB_CMDST_CLEAR) { |
1037 | if (urb->status == -EPIPE) { | 1042 | if (urb->status == -EPIPE) { |
1038 | /* | 1043 | /* |
@@ -1048,9 +1053,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1048 | * We ignore the result for the halt clear. | 1053 | * We ignore the result for the halt clear. |
1049 | */ | 1054 | */ |
1050 | 1055 | ||
1051 | /* reset the endpoint toggle */ | 1056 | usb_reset_endpoint(sc->dev, endp); |
1052 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | ||
1053 | usb_pipeout(sc->last_pipe), 0); | ||
1054 | 1057 | ||
1055 | ub_state_sense(sc, cmd); | 1058 | ub_state_sense(sc, cmd); |
1056 | 1059 | ||
@@ -1065,9 +1068,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1065 | * We ignore the result for the halt clear. | 1068 | * We ignore the result for the halt clear. |
1066 | */ | 1069 | */ |
1067 | 1070 | ||
1068 | /* reset the endpoint toggle */ | 1071 | usb_reset_endpoint(sc->dev, endp); |
1069 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | ||
1070 | usb_pipeout(sc->last_pipe), 0); | ||
1071 | 1072 | ||
1072 | ub_state_stat(sc, cmd); | 1073 | ub_state_stat(sc, cmd); |
1073 | 1074 | ||
@@ -1082,9 +1083,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1082 | * We ignore the result for the halt clear. | 1083 | * We ignore the result for the halt clear. |
1083 | */ | 1084 | */ |
1084 | 1085 | ||
1085 | /* reset the endpoint toggle */ | 1086 | usb_reset_endpoint(sc->dev, endp); |
1086 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | ||
1087 | usb_pipeout(sc->last_pipe), 0); | ||
1088 | 1087 | ||
1089 | ub_state_stat_counted(sc, cmd); | 1088 | ub_state_stat_counted(sc, cmd); |
1090 | 1089 | ||
@@ -2119,8 +2118,7 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) | |||
2119 | del_timer_sync(&timer); | 2118 | del_timer_sync(&timer); |
2120 | usb_kill_urb(&sc->work_urb); | 2119 | usb_kill_urb(&sc->work_urb); |
2121 | 2120 | ||
2122 | /* reset the endpoint toggle */ | 2121 | usb_reset_endpoint(sc->dev, endp); |
2123 | usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0); | ||
2124 | 2122 | ||
2125 | return 0; | 2123 | return 0; |
2126 | } | 2124 | } |
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 6cccdc3f5220..4aecf5dc6a93 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
@@ -563,7 +563,7 @@ static void ace_fsm_dostate(struct ace_device *ace) | |||
563 | case ACE_FSM_STATE_IDENTIFY_PREPARE: | 563 | case ACE_FSM_STATE_IDENTIFY_PREPARE: |
564 | /* Send identify command */ | 564 | /* Send identify command */ |
565 | ace->fsm_task = ACE_TASK_IDENTIFY; | 565 | ace->fsm_task = ACE_TASK_IDENTIFY; |
566 | ace->data_ptr = &ace->cf_id; | 566 | ace->data_ptr = ace->cf_id; |
567 | ace->data_count = ACE_BUF_PER_SECTOR; | 567 | ace->data_count = ACE_BUF_PER_SECTOR; |
568 | ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); | 568 | ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); |
569 | 569 | ||
@@ -608,8 +608,8 @@ static void ace_fsm_dostate(struct ace_device *ace) | |||
608 | break; | 608 | break; |
609 | 609 | ||
610 | case ACE_FSM_STATE_IDENTIFY_COMPLETE: | 610 | case ACE_FSM_STATE_IDENTIFY_COMPLETE: |
611 | ace_fix_driveid(&ace->cf_id[0]); | 611 | ace_fix_driveid(ace->cf_id); |
612 | ace_dump_mem(&ace->cf_id, 512); /* Debug: Dump out disk ID */ | 612 | ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */ |
613 | 613 | ||
614 | if (ace->data_result) { | 614 | if (ace->data_result) { |
615 | /* Error occured, disable the disk */ | 615 | /* Error occured, disable the disk */ |
@@ -622,9 +622,9 @@ static void ace_fsm_dostate(struct ace_device *ace) | |||
622 | 622 | ||
623 | /* Record disk parameters */ | 623 | /* Record disk parameters */ |
624 | set_capacity(ace->gd, | 624 | set_capacity(ace->gd, |
625 | ata_id_u32(&ace->cf_id, ATA_ID_LBA_CAPACITY)); | 625 | ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); |
626 | dev_info(ace->dev, "capacity: %i sectors\n", | 626 | dev_info(ace->dev, "capacity: %i sectors\n", |
627 | ata_id_u32(&ace->cf_id, ATA_ID_LBA_CAPACITY)); | 627 | ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); |
628 | } | 628 | } |
629 | 629 | ||
630 | /* We're done, drop to IDLE state and notify waiters */ | 630 | /* We're done, drop to IDLE state and notify waiters */ |
@@ -923,7 +923,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode) | |||
923 | static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 923 | static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
924 | { | 924 | { |
925 | struct ace_device *ace = bdev->bd_disk->private_data; | 925 | struct ace_device *ace = bdev->bd_disk->private_data; |
926 | u16 *cf_id = &ace->cf_id[0]; | 926 | u16 *cf_id = ace->cf_id; |
927 | 927 | ||
928 | dev_dbg(ace->dev, "ace_getgeo()\n"); | 928 | dev_dbg(ace->dev, "ace_getgeo()\n"); |
929 | 929 | ||
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 9d9490e22e07..3686912427ba 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -2131,6 +2131,8 @@ static const struct intel_driver_description { | |||
2131 | { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", | 2131 | { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", |
2132 | &intel_845_driver, &intel_830_driver }, | 2132 | &intel_845_driver, &intel_830_driver }, |
2133 | { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, | 2133 | { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, |
2134 | { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854", | ||
2135 | &intel_845_driver, &intel_830_driver }, | ||
2134 | { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, | 2136 | { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, |
2135 | { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", | 2137 | { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", |
2136 | &intel_845_driver, &intel_830_driver }, | 2138 | &intel_845_driver, &intel_830_driver }, |
@@ -2355,6 +2357,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
2355 | ID(PCI_DEVICE_ID_INTEL_82845_HB), | 2357 | ID(PCI_DEVICE_ID_INTEL_82845_HB), |
2356 | ID(PCI_DEVICE_ID_INTEL_82845G_HB), | 2358 | ID(PCI_DEVICE_ID_INTEL_82845G_HB), |
2357 | ID(PCI_DEVICE_ID_INTEL_82850_HB), | 2359 | ID(PCI_DEVICE_ID_INTEL_82850_HB), |
2360 | ID(PCI_DEVICE_ID_INTEL_82854_HB), | ||
2358 | ID(PCI_DEVICE_ID_INTEL_82855PM_HB), | 2361 | ID(PCI_DEVICE_ID_INTEL_82855PM_HB), |
2359 | ID(PCI_DEVICE_ID_INTEL_82855GM_HB), | 2362 | ID(PCI_DEVICE_ID_INTEL_82855GM_HB), |
2360 | ID(PCI_DEVICE_ID_INTEL_82860_HB), | 2363 | ID(PCI_DEVICE_ID_INTEL_82860_HB), |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 6de020d078e1..b0a6a3e51924 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/vt_kern.h> | 35 | #include <linux/vt_kern.h> |
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | #include <linux/kexec.h> | 37 | #include <linux/kexec.h> |
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/hrtimer.h> | 38 | #include <linux/hrtimer.h> |
40 | #include <linux/oom.h> | 39 | #include <linux/oom.h> |
41 | 40 | ||
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 2c1d133819b5..08151d4de489 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -2274,7 +2274,7 @@ rescan_last_byte: | |||
2274 | continue; /* nothing to display */ | 2274 | continue; /* nothing to display */ |
2275 | } | 2275 | } |
2276 | /* Glyph not found */ | 2276 | /* Glyph not found */ |
2277 | if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) { | 2277 | if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) { |
2278 | /* In legacy mode use the glyph we get by a 1:1 mapping. | 2278 | /* In legacy mode use the glyph we get by a 1:1 mapping. |
2279 | This would make absolutely no sense with Unicode in mind, | 2279 | This would make absolutely no sense with Unicode in mind, |
2280 | but do this for ASCII characters since a font may lack | 2280 | but do this for ASCII characters since a font may lack |
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 28f2c3f959b5..6ad95c8d6363 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h | |||
@@ -767,11 +767,19 @@ static inline void pci_write_bits16(struct pci_dev *pdev, int offset, | |||
767 | pci_write_config_word(pdev, offset, value); | 767 | pci_write_config_word(pdev, offset, value); |
768 | } | 768 | } |
769 | 769 | ||
770 | /* write all or some bits in a dword-register*/ | 770 | /* |
771 | * pci_write_bits32 | ||
772 | * | ||
773 | * edac local routine to do pci_write_config_dword, but adds | ||
774 | * a mask parameter. If mask is all ones, ignore the mask. | ||
775 | * Otherwise utilize the mask to isolate specified bits | ||
776 | * | ||
777 | * write all or some bits in a dword-register | ||
778 | */ | ||
771 | static inline void pci_write_bits32(struct pci_dev *pdev, int offset, | 779 | static inline void pci_write_bits32(struct pci_dev *pdev, int offset, |
772 | u32 value, u32 mask) | 780 | u32 value, u32 mask) |
773 | { | 781 | { |
774 | if (mask != 0xffff) { | 782 | if (mask != 0xffffffff) { |
775 | u32 buf; | 783 | u32 buf; |
776 | 784 | ||
777 | pci_read_config_dword(pdev, offset, &buf); | 785 | pci_read_config_dword(pdev, offset, &buf); |
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index ca9113e1c106..a7d2c717d033 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c | |||
@@ -389,7 +389,7 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info | |||
389 | */ | 389 | */ |
390 | static void edac_device_workq_function(struct work_struct *work_req) | 390 | static void edac_device_workq_function(struct work_struct *work_req) |
391 | { | 391 | { |
392 | struct delayed_work *d_work = (struct delayed_work *)work_req; | 392 | struct delayed_work *d_work = to_delayed_work(work_req); |
393 | struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work); | 393 | struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work); |
394 | 394 | ||
395 | mutex_lock(&device_ctls_mutex); | 395 | mutex_lock(&device_ctls_mutex); |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 25d66940b4fa..335b7ebdb11c 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -260,7 +260,7 @@ static int edac_mc_assert_error_check_and_clear(void) | |||
260 | */ | 260 | */ |
261 | static void edac_mc_workq_function(struct work_struct *work_req) | 261 | static void edac_mc_workq_function(struct work_struct *work_req) |
262 | { | 262 | { |
263 | struct delayed_work *d_work = (struct delayed_work *)work_req; | 263 | struct delayed_work *d_work = to_delayed_work(work_req); |
264 | struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); | 264 | struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); |
265 | 265 | ||
266 | mutex_lock(&mem_ctls_mutex); | 266 | mutex_lock(&mem_ctls_mutex); |
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index 5b150aea703a..30b585b1d60b 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c | |||
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(edac_pci_find); | |||
233 | */ | 233 | */ |
234 | static void edac_pci_workq_function(struct work_struct *work_req) | 234 | static void edac_pci_workq_function(struct work_struct *work_req) |
235 | { | 235 | { |
236 | struct delayed_work *d_work = (struct delayed_work *)work_req; | 236 | struct delayed_work *d_work = to_delayed_work(work_req); |
237 | struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work); | 237 | struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work); |
238 | int msec; | 238 | int msec; |
239 | unsigned long delay; | 239 | unsigned long delay; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3750d8003048..473a8f7fbdb5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -446,6 +446,9 @@ struct drm_i915_gem_object { | |||
446 | uint32_t tiling_mode; | 446 | uint32_t tiling_mode; |
447 | uint32_t stride; | 447 | uint32_t stride; |
448 | 448 | ||
449 | /** Record of address bit 17 of each page at last unbind. */ | ||
450 | long *bit_17; | ||
451 | |||
449 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ | 452 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ |
450 | uint32_t agp_type; | 453 | uint32_t agp_type; |
451 | 454 | ||
@@ -635,9 +638,13 @@ int i915_gem_attach_phys_object(struct drm_device *dev, | |||
635 | void i915_gem_detach_phys_object(struct drm_device *dev, | 638 | void i915_gem_detach_phys_object(struct drm_device *dev, |
636 | struct drm_gem_object *obj); | 639 | struct drm_gem_object *obj); |
637 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 640 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
641 | int i915_gem_object_get_pages(struct drm_gem_object *obj); | ||
642 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | ||
638 | 643 | ||
639 | /* i915_gem_tiling.c */ | 644 | /* i915_gem_tiling.c */ |
640 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 645 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
646 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | ||
647 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); | ||
641 | 648 | ||
642 | /* i915_gem_debug.c */ | 649 | /* i915_gem_debug.c */ |
643 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 650 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1449b452cc63..4642115902d6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -43,8 +43,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
43 | uint64_t offset, | 43 | uint64_t offset, |
44 | uint64_t size); | 44 | uint64_t size); |
45 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 45 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); |
46 | static int i915_gem_object_get_pages(struct drm_gem_object *obj); | ||
47 | static void i915_gem_object_put_pages(struct drm_gem_object *obj); | ||
48 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 46 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); |
49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 47 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
50 | unsigned alignment); | 48 | unsigned alignment); |
@@ -143,15 +141,27 @@ fast_shmem_read(struct page **pages, | |||
143 | int length) | 141 | int length) |
144 | { | 142 | { |
145 | char __iomem *vaddr; | 143 | char __iomem *vaddr; |
146 | int ret; | 144 | int unwritten; |
147 | 145 | ||
148 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | 146 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); |
149 | if (vaddr == NULL) | 147 | if (vaddr == NULL) |
150 | return -ENOMEM; | 148 | return -ENOMEM; |
151 | ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); | 149 | unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); |
152 | kunmap_atomic(vaddr, KM_USER0); | 150 | kunmap_atomic(vaddr, KM_USER0); |
153 | 151 | ||
154 | return ret; | 152 | if (unwritten) |
153 | return -EFAULT; | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | ||
159 | { | ||
160 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | ||
161 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
162 | |||
163 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | ||
164 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
155 | } | 165 | } |
156 | 166 | ||
157 | static inline int | 167 | static inline int |
@@ -181,6 +191,64 @@ slow_shmem_copy(struct page *dst_page, | |||
181 | return 0; | 191 | return 0; |
182 | } | 192 | } |
183 | 193 | ||
194 | static inline int | ||
195 | slow_shmem_bit17_copy(struct page *gpu_page, | ||
196 | int gpu_offset, | ||
197 | struct page *cpu_page, | ||
198 | int cpu_offset, | ||
199 | int length, | ||
200 | int is_read) | ||
201 | { | ||
202 | char *gpu_vaddr, *cpu_vaddr; | ||
203 | |||
204 | /* Use the unswizzled path if this page isn't affected. */ | ||
205 | if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { | ||
206 | if (is_read) | ||
207 | return slow_shmem_copy(cpu_page, cpu_offset, | ||
208 | gpu_page, gpu_offset, length); | ||
209 | else | ||
210 | return slow_shmem_copy(gpu_page, gpu_offset, | ||
211 | cpu_page, cpu_offset, length); | ||
212 | } | ||
213 | |||
214 | gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); | ||
215 | if (gpu_vaddr == NULL) | ||
216 | return -ENOMEM; | ||
217 | |||
218 | cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); | ||
219 | if (cpu_vaddr == NULL) { | ||
220 | kunmap_atomic(gpu_vaddr, KM_USER0); | ||
221 | return -ENOMEM; | ||
222 | } | ||
223 | |||
224 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's | ||
225 | * XORing with the other bits (A9 for Y, A9 and A10 for X) | ||
226 | */ | ||
227 | while (length > 0) { | ||
228 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | ||
229 | int this_length = min(cacheline_end - gpu_offset, length); | ||
230 | int swizzled_gpu_offset = gpu_offset ^ 64; | ||
231 | |||
232 | if (is_read) { | ||
233 | memcpy(cpu_vaddr + cpu_offset, | ||
234 | gpu_vaddr + swizzled_gpu_offset, | ||
235 | this_length); | ||
236 | } else { | ||
237 | memcpy(gpu_vaddr + swizzled_gpu_offset, | ||
238 | cpu_vaddr + cpu_offset, | ||
239 | this_length); | ||
240 | } | ||
241 | cpu_offset += this_length; | ||
242 | gpu_offset += this_length; | ||
243 | length -= this_length; | ||
244 | } | ||
245 | |||
246 | kunmap_atomic(cpu_vaddr, KM_USER1); | ||
247 | kunmap_atomic(gpu_vaddr, KM_USER0); | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
184 | /** | 252 | /** |
185 | * This is the fast shmem pread path, which attempts to copy_from_user directly | 253 | * This is the fast shmem pread path, which attempts to copy_from_user directly |
186 | * from the backing pages of the object to the user's address space. On a | 254 | * from the backing pages of the object to the user's address space. On a |
@@ -269,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
269 | int page_length; | 337 | int page_length; |
270 | int ret; | 338 | int ret; |
271 | uint64_t data_ptr = args->data_ptr; | 339 | uint64_t data_ptr = args->data_ptr; |
340 | int do_bit17_swizzling; | ||
272 | 341 | ||
273 | remain = args->size; | 342 | remain = args->size; |
274 | 343 | ||
@@ -286,13 +355,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
286 | 355 | ||
287 | down_read(&mm->mmap_sem); | 356 | down_read(&mm->mmap_sem); |
288 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 357 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, |
289 | num_pages, 0, 0, user_pages, NULL); | 358 | num_pages, 1, 0, user_pages, NULL); |
290 | up_read(&mm->mmap_sem); | 359 | up_read(&mm->mmap_sem); |
291 | if (pinned_pages < num_pages) { | 360 | if (pinned_pages < num_pages) { |
292 | ret = -EFAULT; | 361 | ret = -EFAULT; |
293 | goto fail_put_user_pages; | 362 | goto fail_put_user_pages; |
294 | } | 363 | } |
295 | 364 | ||
365 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | ||
366 | |||
296 | mutex_lock(&dev->struct_mutex); | 367 | mutex_lock(&dev->struct_mutex); |
297 | 368 | ||
298 | ret = i915_gem_object_get_pages(obj); | 369 | ret = i915_gem_object_get_pages(obj); |
@@ -327,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
327 | if ((data_page_offset + page_length) > PAGE_SIZE) | 398 | if ((data_page_offset + page_length) > PAGE_SIZE) |
328 | page_length = PAGE_SIZE - data_page_offset; | 399 | page_length = PAGE_SIZE - data_page_offset; |
329 | 400 | ||
330 | ret = slow_shmem_copy(user_pages[data_page_index], | 401 | if (do_bit17_swizzling) { |
331 | data_page_offset, | 402 | ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], |
332 | obj_priv->pages[shmem_page_index], | 403 | shmem_page_offset, |
333 | shmem_page_offset, | 404 | user_pages[data_page_index], |
334 | page_length); | 405 | data_page_offset, |
406 | page_length, | ||
407 | 1); | ||
408 | } else { | ||
409 | ret = slow_shmem_copy(user_pages[data_page_index], | ||
410 | data_page_offset, | ||
411 | obj_priv->pages[shmem_page_index], | ||
412 | shmem_page_offset, | ||
413 | page_length); | ||
414 | } | ||
335 | if (ret) | 415 | if (ret) |
336 | goto fail_put_pages; | 416 | goto fail_put_pages; |
337 | 417 | ||
@@ -383,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
383 | return -EINVAL; | 463 | return -EINVAL; |
384 | } | 464 | } |
385 | 465 | ||
386 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | 466 | if (i915_gem_object_needs_bit17_swizzle(obj)) { |
387 | if (ret != 0) | ||
388 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); | 467 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); |
468 | } else { | ||
469 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | ||
470 | if (ret != 0) | ||
471 | ret = i915_gem_shmem_pread_slow(dev, obj, args, | ||
472 | file_priv); | ||
473 | } | ||
389 | 474 | ||
390 | drm_gem_object_unreference(obj); | 475 | drm_gem_object_unreference(obj); |
391 | 476 | ||
@@ -727,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
727 | int page_length; | 812 | int page_length; |
728 | int ret; | 813 | int ret; |
729 | uint64_t data_ptr = args->data_ptr; | 814 | uint64_t data_ptr = args->data_ptr; |
815 | int do_bit17_swizzling; | ||
730 | 816 | ||
731 | remain = args->size; | 817 | remain = args->size; |
732 | 818 | ||
@@ -751,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
751 | goto fail_put_user_pages; | 837 | goto fail_put_user_pages; |
752 | } | 838 | } |
753 | 839 | ||
840 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | ||
841 | |||
754 | mutex_lock(&dev->struct_mutex); | 842 | mutex_lock(&dev->struct_mutex); |
755 | 843 | ||
756 | ret = i915_gem_object_get_pages(obj); | 844 | ret = i915_gem_object_get_pages(obj); |
@@ -785,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
785 | if ((data_page_offset + page_length) > PAGE_SIZE) | 873 | if ((data_page_offset + page_length) > PAGE_SIZE) |
786 | page_length = PAGE_SIZE - data_page_offset; | 874 | page_length = PAGE_SIZE - data_page_offset; |
787 | 875 | ||
788 | ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], | 876 | if (do_bit17_swizzling) { |
789 | shmem_page_offset, | 877 | ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], |
790 | user_pages[data_page_index], | 878 | shmem_page_offset, |
791 | data_page_offset, | 879 | user_pages[data_page_index], |
792 | page_length); | 880 | data_page_offset, |
881 | page_length, | ||
882 | 0); | ||
883 | } else { | ||
884 | ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], | ||
885 | shmem_page_offset, | ||
886 | user_pages[data_page_index], | ||
887 | data_page_offset, | ||
888 | page_length); | ||
889 | } | ||
793 | if (ret) | 890 | if (ret) |
794 | goto fail_put_pages; | 891 | goto fail_put_pages; |
795 | 892 | ||
@@ -854,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
854 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, | 951 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, |
855 | file_priv); | 952 | file_priv); |
856 | } | 953 | } |
954 | } else if (i915_gem_object_needs_bit17_swizzle(obj)) { | ||
955 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); | ||
857 | } else { | 956 | } else { |
858 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); | 957 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); |
859 | if (ret == -EFAULT) { | 958 | if (ret == -EFAULT) { |
@@ -1285,7 +1384,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1285 | return 0; | 1384 | return 0; |
1286 | } | 1385 | } |
1287 | 1386 | ||
1288 | static void | 1387 | void |
1289 | i915_gem_object_put_pages(struct drm_gem_object *obj) | 1388 | i915_gem_object_put_pages(struct drm_gem_object *obj) |
1290 | { | 1389 | { |
1291 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1390 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
@@ -1297,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) | |||
1297 | if (--obj_priv->pages_refcount != 0) | 1396 | if (--obj_priv->pages_refcount != 0) |
1298 | return; | 1397 | return; |
1299 | 1398 | ||
1399 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
1400 | i915_gem_object_save_bit_17_swizzle(obj); | ||
1401 | |||
1300 | for (i = 0; i < page_count; i++) | 1402 | for (i = 0; i < page_count; i++) |
1301 | if (obj_priv->pages[i] != NULL) { | 1403 | if (obj_priv->pages[i] != NULL) { |
1302 | if (obj_priv->dirty) | 1404 | if (obj_priv->dirty) |
@@ -1494,8 +1596,19 @@ i915_gem_retire_request(struct drm_device *dev, | |||
1494 | 1596 | ||
1495 | if (obj->write_domain != 0) | 1597 | if (obj->write_domain != 0) |
1496 | i915_gem_object_move_to_flushing(obj); | 1598 | i915_gem_object_move_to_flushing(obj); |
1497 | else | 1599 | else { |
1600 | /* Take a reference on the object so it won't be | ||
1601 | * freed while the spinlock is held. The list | ||
1602 | * protection for this spinlock is safe when breaking | ||
1603 | * the lock like this since the next thing we do | ||
1604 | * is just get the head of the list again. | ||
1605 | */ | ||
1606 | drm_gem_object_reference(obj); | ||
1498 | i915_gem_object_move_to_inactive(obj); | 1607 | i915_gem_object_move_to_inactive(obj); |
1608 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
1609 | drm_gem_object_unreference(obj); | ||
1610 | spin_lock(&dev_priv->mm.active_list_lock); | ||
1611 | } | ||
1499 | } | 1612 | } |
1500 | out: | 1613 | out: |
1501 | spin_unlock(&dev_priv->mm.active_list_lock); | 1614 | spin_unlock(&dev_priv->mm.active_list_lock); |
@@ -1884,7 +1997,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
1884 | return ret; | 1997 | return ret; |
1885 | } | 1998 | } |
1886 | 1999 | ||
1887 | static int | 2000 | int |
1888 | i915_gem_object_get_pages(struct drm_gem_object *obj) | 2001 | i915_gem_object_get_pages(struct drm_gem_object *obj) |
1889 | { | 2002 | { |
1890 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2003 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
@@ -1922,6 +2035,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
1922 | } | 2035 | } |
1923 | obj_priv->pages[i] = page; | 2036 | obj_priv->pages[i] = page; |
1924 | } | 2037 | } |
2038 | |||
2039 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
2040 | i915_gem_object_do_bit_17_swizzle(obj); | ||
2041 | |||
1925 | return 0; | 2042 | return 0; |
1926 | } | 2043 | } |
1927 | 2044 | ||
@@ -3002,13 +3119,13 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3002 | drm_free(*relocs, reloc_count * sizeof(**relocs), | 3119 | drm_free(*relocs, reloc_count * sizeof(**relocs), |
3003 | DRM_MEM_DRIVER); | 3120 | DRM_MEM_DRIVER); |
3004 | *relocs = NULL; | 3121 | *relocs = NULL; |
3005 | return ret; | 3122 | return -EFAULT; |
3006 | } | 3123 | } |
3007 | 3124 | ||
3008 | reloc_index += exec_list[i].relocation_count; | 3125 | reloc_index += exec_list[i].relocation_count; |
3009 | } | 3126 | } |
3010 | 3127 | ||
3011 | return ret; | 3128 | return 0; |
3012 | } | 3129 | } |
3013 | 3130 | ||
3014 | static int | 3131 | static int |
@@ -3017,23 +3134,28 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, | |||
3017 | struct drm_i915_gem_relocation_entry *relocs) | 3134 | struct drm_i915_gem_relocation_entry *relocs) |
3018 | { | 3135 | { |
3019 | uint32_t reloc_count = 0, i; | 3136 | uint32_t reloc_count = 0, i; |
3020 | int ret; | 3137 | int ret = 0; |
3021 | 3138 | ||
3022 | for (i = 0; i < buffer_count; i++) { | 3139 | for (i = 0; i < buffer_count; i++) { |
3023 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3140 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3141 | int unwritten; | ||
3024 | 3142 | ||
3025 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | 3143 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; |
3026 | 3144 | ||
3027 | if (ret == 0) { | 3145 | unwritten = copy_to_user(user_relocs, |
3028 | ret = copy_to_user(user_relocs, | 3146 | &relocs[reloc_count], |
3029 | &relocs[reloc_count], | 3147 | exec_list[i].relocation_count * |
3030 | exec_list[i].relocation_count * | 3148 | sizeof(*relocs)); |
3031 | sizeof(*relocs)); | 3149 | |
3150 | if (unwritten) { | ||
3151 | ret = -EFAULT; | ||
3152 | goto err; | ||
3032 | } | 3153 | } |
3033 | 3154 | ||
3034 | reloc_count += exec_list[i].relocation_count; | 3155 | reloc_count += exec_list[i].relocation_count; |
3035 | } | 3156 | } |
3036 | 3157 | ||
3158 | err: | ||
3037 | drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); | 3159 | drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); |
3038 | 3160 | ||
3039 | return ret; | 3161 | return ret; |
@@ -3243,7 +3365,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3243 | exec_offset = exec_list[args->buffer_count - 1].offset; | 3365 | exec_offset = exec_list[args->buffer_count - 1].offset; |
3244 | 3366 | ||
3245 | #if WATCH_EXEC | 3367 | #if WATCH_EXEC |
3246 | i915_gem_dump_object(object_list[args->buffer_count - 1], | 3368 | i915_gem_dump_object(batch_obj, |
3247 | args->batch_len, | 3369 | args->batch_len, |
3248 | __func__, | 3370 | __func__, |
3249 | ~0); | 3371 | ~0); |
@@ -3308,10 +3430,12 @@ err: | |||
3308 | (uintptr_t) args->buffers_ptr, | 3430 | (uintptr_t) args->buffers_ptr, |
3309 | exec_list, | 3431 | exec_list, |
3310 | sizeof(*exec_list) * args->buffer_count); | 3432 | sizeof(*exec_list) * args->buffer_count); |
3311 | if (ret) | 3433 | if (ret) { |
3434 | ret = -EFAULT; | ||
3312 | DRM_ERROR("failed to copy %d exec entries " | 3435 | DRM_ERROR("failed to copy %d exec entries " |
3313 | "back to user (%d)\n", | 3436 | "back to user (%d)\n", |
3314 | args->buffer_count, ret); | 3437 | args->buffer_count, ret); |
3438 | } | ||
3315 | } | 3439 | } |
3316 | 3440 | ||
3317 | /* Copy the updated relocations out regardless of current error | 3441 | /* Copy the updated relocations out regardless of current error |
@@ -3593,6 +3717,7 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
3593 | i915_gem_free_mmap_offset(obj); | 3717 | i915_gem_free_mmap_offset(obj); |
3594 | 3718 | ||
3595 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); | 3719 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); |
3720 | kfree(obj_priv->bit_17); | ||
3596 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); | 3721 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); |
3597 | } | 3722 | } |
3598 | 3723 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index a1ac0c5e7307..986f1082c596 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c | |||
@@ -234,6 +234,96 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
234 | return 0; | 234 | return 0; |
235 | } | 235 | } |
236 | 236 | ||
237 | static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) | ||
238 | { | ||
239 | int page, i; | ||
240 | uint32_t *mem; | ||
241 | |||
242 | for (page = 0; page < page_count; page++) { | ||
243 | mem = kmap(pages[page]); | ||
244 | for (i = 0; i < PAGE_SIZE; i += 4) | ||
245 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | ||
246 | kunmap(pages[page]); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | static int i915_batchbuffer_info(struct seq_file *m, void *data) | ||
251 | { | ||
252 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
253 | struct drm_device *dev = node->minor->dev; | ||
254 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
255 | struct drm_gem_object *obj; | ||
256 | struct drm_i915_gem_object *obj_priv; | ||
257 | int ret; | ||
258 | |||
259 | spin_lock(&dev_priv->mm.active_list_lock); | ||
260 | |||
261 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | ||
262 | obj = obj_priv->obj; | ||
263 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | ||
264 | ret = i915_gem_object_get_pages(obj); | ||
265 | if (ret) { | ||
266 | DRM_ERROR("Failed to get pages: %d\n", ret); | ||
267 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
268 | return ret; | ||
269 | } | ||
270 | |||
271 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset); | ||
272 | i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE); | ||
273 | |||
274 | i915_gem_object_put_pages(obj); | ||
275 | } | ||
276 | } | ||
277 | |||
278 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static int i915_ringbuffer_data(struct seq_file *m, void *data) | ||
284 | { | ||
285 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
286 | struct drm_device *dev = node->minor->dev; | ||
287 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
288 | u8 *virt; | ||
289 | uint32_t *ptr, off; | ||
290 | |||
291 | if (!dev_priv->ring.ring_obj) { | ||
292 | seq_printf(m, "No ringbuffer setup\n"); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | virt = dev_priv->ring.virtual_start; | ||
297 | |||
298 | for (off = 0; off < dev_priv->ring.Size; off += 4) { | ||
299 | ptr = (uint32_t *)(virt + off); | ||
300 | seq_printf(m, "%08x : %08x\n", off, *ptr); | ||
301 | } | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | static int i915_ringbuffer_info(struct seq_file *m, void *data) | ||
307 | { | ||
308 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
309 | struct drm_device *dev = node->minor->dev; | ||
310 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
311 | unsigned int head, tail, mask; | ||
312 | |||
313 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
314 | tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | ||
315 | mask = dev_priv->ring.tail_mask; | ||
316 | |||
317 | seq_printf(m, "RingHead : %08x\n", head); | ||
318 | seq_printf(m, "RingTail : %08x\n", tail); | ||
319 | seq_printf(m, "RingMask : %08x\n", mask); | ||
320 | seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); | ||
321 | seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); | ||
322 | |||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | |||
237 | static struct drm_info_list i915_gem_debugfs_list[] = { | 327 | static struct drm_info_list i915_gem_debugfs_list[] = { |
238 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 328 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
239 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 329 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
@@ -243,6 +333,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = { | |||
243 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, | 333 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, |
244 | {"i915_gem_interrupt", i915_interrupt_info, 0}, | 334 | {"i915_gem_interrupt", i915_interrupt_info, 0}, |
245 | {"i915_gem_hws", i915_hws_info, 0}, | 335 | {"i915_gem_hws", i915_hws_info, 0}, |
336 | {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, | ||
337 | {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, | ||
338 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, | ||
246 | }; | 339 | }; |
247 | #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) | 340 | #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) |
248 | 341 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 6be3f927c86a..f27e523c764f 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -25,6 +25,8 @@ | |||
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include "linux/string.h" | ||
29 | #include "linux/bitops.h" | ||
28 | #include "drmP.h" | 30 | #include "drmP.h" |
29 | #include "drm.h" | 31 | #include "drm.h" |
30 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
@@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
127 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; | 129 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; |
128 | } else { | 130 | } else { |
129 | /* Bit 17 swizzling by the CPU in addition. */ | 131 | /* Bit 17 swizzling by the CPU in addition. */ |
130 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 132 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; |
131 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 133 | swizzle_y = I915_BIT_6_SWIZZLE_9_17; |
132 | } | 134 | } |
133 | break; | 135 | break; |
134 | } | 136 | } |
@@ -288,6 +290,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
288 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | 290 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; |
289 | else | 291 | else |
290 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; | 292 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; |
293 | |||
294 | /* Hide bit 17 swizzling from the user. This prevents old Mesa | ||
295 | * from aborting the application on sw fallbacks to bit 17, | ||
296 | * and we use the pread/pwrite bit17 paths to swizzle for it. | ||
297 | * If there was a user that was relying on the swizzle | ||
298 | * information for drm_intel_bo_map()ed reads/writes this would | ||
299 | * break it, but we don't have any of those. | ||
300 | */ | ||
301 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | ||
302 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | ||
303 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | ||
304 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | ||
305 | |||
291 | /* If we can't handle the swizzling, make it untiled. */ | 306 | /* If we can't handle the swizzling, make it untiled. */ |
292 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { | 307 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { |
293 | args->tiling_mode = I915_TILING_NONE; | 308 | args->tiling_mode = I915_TILING_NONE; |
@@ -354,8 +369,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
354 | DRM_ERROR("unknown tiling mode\n"); | 369 | DRM_ERROR("unknown tiling mode\n"); |
355 | } | 370 | } |
356 | 371 | ||
372 | /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ | ||
373 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | ||
374 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | ||
375 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | ||
376 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | ||
377 | |||
357 | drm_gem_object_unreference(obj); | 378 | drm_gem_object_unreference(obj); |
358 | mutex_unlock(&dev->struct_mutex); | 379 | mutex_unlock(&dev->struct_mutex); |
359 | 380 | ||
360 | return 0; | 381 | return 0; |
361 | } | 382 | } |
383 | |||
384 | /** | ||
385 | * Swap every 64 bytes of this page around, to account for it having a new | ||
386 | * bit 17 of its physical address and therefore being interpreted differently | ||
387 | * by the GPU. | ||
388 | */ | ||
389 | static int | ||
390 | i915_gem_swizzle_page(struct page *page) | ||
391 | { | ||
392 | char *vaddr; | ||
393 | int i; | ||
394 | char temp[64]; | ||
395 | |||
396 | vaddr = kmap(page); | ||
397 | if (vaddr == NULL) | ||
398 | return -ENOMEM; | ||
399 | |||
400 | for (i = 0; i < PAGE_SIZE; i += 128) { | ||
401 | memcpy(temp, &vaddr[i], 64); | ||
402 | memcpy(&vaddr[i], &vaddr[i + 64], 64); | ||
403 | memcpy(&vaddr[i + 64], temp, 64); | ||
404 | } | ||
405 | |||
406 | kunmap(page); | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | void | ||
412 | i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) | ||
413 | { | ||
414 | struct drm_device *dev = obj->dev; | ||
415 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
416 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
417 | int page_count = obj->size >> PAGE_SHIFT; | ||
418 | int i; | ||
419 | |||
420 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | ||
421 | return; | ||
422 | |||
423 | if (obj_priv->bit_17 == NULL) | ||
424 | return; | ||
425 | |||
426 | for (i = 0; i < page_count; i++) { | ||
427 | char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; | ||
428 | if ((new_bit_17 & 0x1) != | ||
429 | (test_bit(i, obj_priv->bit_17) != 0)) { | ||
430 | int ret = i915_gem_swizzle_page(obj_priv->pages[i]); | ||
431 | if (ret != 0) { | ||
432 | DRM_ERROR("Failed to swizzle page\n"); | ||
433 | return; | ||
434 | } | ||
435 | set_page_dirty(obj_priv->pages[i]); | ||
436 | } | ||
437 | } | ||
438 | } | ||
439 | |||
440 | void | ||
441 | i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | ||
442 | { | ||
443 | struct drm_device *dev = obj->dev; | ||
444 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
445 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
446 | int page_count = obj->size >> PAGE_SHIFT; | ||
447 | int i; | ||
448 | |||
449 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | ||
450 | return; | ||
451 | |||
452 | if (obj_priv->bit_17 == NULL) { | ||
453 | obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * | ||
454 | sizeof(long), GFP_KERNEL); | ||
455 | if (obj_priv->bit_17 == NULL) { | ||
456 | DRM_ERROR("Failed to allocate memory for bit 17 " | ||
457 | "record\n"); | ||
458 | return; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | for (i = 0; i < page_count; i++) { | ||
463 | if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) | ||
464 | __set_bit(i, obj_priv->bit_17); | ||
465 | else | ||
466 | __clear_bit(i, obj_priv->bit_17); | ||
467 | } | ||
468 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 64773ce52964..c2c8e95ff14d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -367,6 +367,7 @@ static const intel_limit_t intel_limits[] = { | |||
367 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 367 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, |
368 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 368 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
369 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 369 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
370 | .find_pll = intel_find_best_PLL, | ||
370 | }, | 371 | }, |
371 | { /* INTEL_LIMIT_IGD_LVDS */ | 372 | { /* INTEL_LIMIT_IGD_LVDS */ |
372 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 373 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
@@ -380,6 +381,7 @@ static const intel_limit_t intel_limits[] = { | |||
380 | /* IGD only supports single-channel mode. */ | 381 | /* IGD only supports single-channel mode. */ |
381 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 382 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
382 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | 383 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, |
384 | .find_pll = intel_find_best_PLL, | ||
383 | }, | 385 | }, |
384 | 386 | ||
385 | }; | 387 | }; |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index b7f0ebe9f810..3e094beecb99 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -864,8 +864,8 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) | |||
864 | 864 | ||
865 | static struct sysrq_key_op sysrq_intelfb_restore_op = { | 865 | static struct sysrq_key_op sysrq_intelfb_restore_op = { |
866 | .handler = intelfb_sysrq, | 866 | .handler = intelfb_sysrq, |
867 | .help_msg = "force fb", | 867 | .help_msg = "force-fb(G)", |
868 | .action_msg = "force restore of fb console", | 868 | .action_msg = "Restore framebuffer console", |
869 | }; | 869 | }; |
870 | 870 | ||
871 | int intelfb_probe(struct drm_device *dev) | 871 | int intelfb_probe(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b06a4a3ff08d..550374225388 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -38,7 +38,7 @@ | |||
38 | struct intel_hdmi_priv { | 38 | struct intel_hdmi_priv { |
39 | u32 sdvox_reg; | 39 | u32 sdvox_reg; |
40 | u32 save_SDVOX; | 40 | u32 save_SDVOX; |
41 | int has_hdmi_sink; | 41 | bool has_hdmi_sink; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, | 44 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, |
@@ -128,6 +128,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
128 | return true; | 128 | return true; |
129 | } | 129 | } |
130 | 130 | ||
131 | static void | ||
132 | intel_hdmi_sink_detect(struct drm_connector *connector) | ||
133 | { | ||
134 | struct intel_output *intel_output = to_intel_output(connector); | ||
135 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
136 | struct edid *edid = NULL; | ||
137 | |||
138 | edid = drm_get_edid(&intel_output->base, | ||
139 | &intel_output->ddc_bus->adapter); | ||
140 | if (edid != NULL) { | ||
141 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | ||
142 | kfree(edid); | ||
143 | intel_output->base.display_info.raw_edid = NULL; | ||
144 | } | ||
145 | } | ||
146 | |||
131 | static enum drm_connector_status | 147 | static enum drm_connector_status |
132 | intel_hdmi_detect(struct drm_connector *connector) | 148 | intel_hdmi_detect(struct drm_connector *connector) |
133 | { | 149 | { |
@@ -158,9 +174,10 @@ intel_hdmi_detect(struct drm_connector *connector) | |||
158 | return connector_status_unknown; | 174 | return connector_status_unknown; |
159 | } | 175 | } |
160 | 176 | ||
161 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) | 177 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { |
178 | intel_hdmi_sink_detect(connector); | ||
162 | return connector_status_connected; | 179 | return connector_status_connected; |
163 | else | 180 | } else |
164 | return connector_status_disconnected; | 181 | return connector_status_disconnected; |
165 | } | 182 | } |
166 | 183 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 7b31f55f55c8..9913651c1e17 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1357,6 +1357,23 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | |||
1357 | intel_sdvo_read_response(intel_output, &response, 2); | 1357 | intel_sdvo_read_response(intel_output, &response, 2); |
1358 | } | 1358 | } |
1359 | 1359 | ||
1360 | static void | ||
1361 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | ||
1362 | { | ||
1363 | struct intel_output *intel_output = to_intel_output(connector); | ||
1364 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1365 | struct edid *edid = NULL; | ||
1366 | |||
1367 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | ||
1368 | edid = drm_get_edid(&intel_output->base, | ||
1369 | &intel_output->ddc_bus->adapter); | ||
1370 | if (edid != NULL) { | ||
1371 | sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); | ||
1372 | kfree(edid); | ||
1373 | intel_output->base.display_info.raw_edid = NULL; | ||
1374 | } | ||
1375 | } | ||
1376 | |||
1360 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) | 1377 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) |
1361 | { | 1378 | { |
1362 | u8 response[2]; | 1379 | u8 response[2]; |
@@ -1371,9 +1388,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1371 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1388 | if (status != SDVO_CMD_STATUS_SUCCESS) |
1372 | return connector_status_unknown; | 1389 | return connector_status_unknown; |
1373 | 1390 | ||
1374 | if ((response[0] != 0) || (response[1] != 0)) | 1391 | if ((response[0] != 0) || (response[1] != 0)) { |
1392 | intel_sdvo_hdmi_sink_detect(connector); | ||
1375 | return connector_status_connected; | 1393 | return connector_status_connected; |
1376 | else | 1394 | } else |
1377 | return connector_status_disconnected; | 1395 | return connector_status_disconnected; |
1378 | } | 1396 | } |
1379 | 1397 | ||
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 0e8a9185f676..d73f5f473e38 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -692,6 +692,16 @@ config SENSORS_PCF8591 | |||
692 | These devices are hard to detect and rarely found on mainstream | 692 | These devices are hard to detect and rarely found on mainstream |
693 | hardware. If unsure, say N. | 693 | hardware. If unsure, say N. |
694 | 694 | ||
695 | config SENSORS_SHT15 | ||
696 | tristate "Sensiron humidity and temperature sensors. SHT15 and compat." | ||
697 | depends on GENERIC_GPIO | ||
698 | help | ||
699 | If you say yes here you get support for the Sensiron SHT10, SHT11, | ||
700 | SHT15, SHT71, SHT75 humidity and temperature sensors. | ||
701 | |||
702 | This driver can also be built as a module. If so, the module | ||
703 | will be called sht15. | ||
704 | |||
695 | config SENSORS_SIS5595 | 705 | config SENSORS_SIS5595 |
696 | tristate "Silicon Integrated Systems Corp. SiS5595" | 706 | tristate "Silicon Integrated Systems Corp. SiS5595" |
697 | depends on PCI | 707 | depends on PCI |
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 1d3757837b4f..0ae26984ba45 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile | |||
@@ -76,6 +76,7 @@ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o | |||
76 | obj-$(CONFIG_SENSORS_PC87360) += pc87360.o | 76 | obj-$(CONFIG_SENSORS_PC87360) += pc87360.o |
77 | obj-$(CONFIG_SENSORS_PC87427) += pc87427.o | 77 | obj-$(CONFIG_SENSORS_PC87427) += pc87427.o |
78 | obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o | 78 | obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o |
79 | obj-$(CONFIG_SENSORS_SHT15) += sht15.o | ||
79 | obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o | 80 | obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o |
80 | obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o | 81 | obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o |
81 | obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o | 82 | obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o |
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index 55d3dc565be6..abca7e9f953b 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
35 | #include <linux/poll.h> | 35 | #include <linux/poll.h> |
36 | #include <linux/freezer.h> | 36 | #include <linux/freezer.h> |
37 | #include <linux/version.h> | ||
38 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
39 | #include <linux/leds.h> | 38 | #include <linux/leds.h> |
40 | #include <acpi/acpi_drivers.h> | 39 | #include <acpi/acpi_drivers.h> |
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c new file mode 100644 index 000000000000..6cbdc2fea734 --- /dev/null +++ b/drivers/hwmon/sht15.c | |||
@@ -0,0 +1,692 @@ | |||
1 | /* | ||
2 | * sht15.c - support for the SHT15 Temperature and Humidity Sensor | ||
3 | * | ||
4 | * Copyright (c) 2009 Jonathan Cameron | ||
5 | * | ||
6 | * Copyright (c) 2007 Wouter Horre | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Currently ignoring checksum on readings. | ||
13 | * Default resolution only (14bit temp, 12bit humidity) | ||
14 | * Ignoring battery status. | ||
15 | * Heater not enabled. | ||
16 | * Timings are all conservative. | ||
17 | * | ||
18 | * Data sheet available (1/2009) at | ||
19 | * http://www.sensirion.ch/en/pdf/product_information/Datasheet-humidity-sensor-SHT1x.pdf | ||
20 | * | ||
21 | * Regulator supply name = vcc | ||
22 | */ | ||
23 | |||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/gpio.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/hwmon.h> | ||
30 | #include <linux/hwmon-sysfs.h> | ||
31 | #include <linux/mutex.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/jiffies.h> | ||
35 | #include <linux/err.h> | ||
36 | #include <linux/sht15.h> | ||
37 | #include <linux/regulator/consumer.h> | ||
38 | #include <asm/atomic.h> | ||
39 | |||
40 | #define SHT15_MEASURE_TEMP 3 | ||
41 | #define SHT15_MEASURE_RH 5 | ||
42 | |||
43 | #define SHT15_READING_NOTHING 0 | ||
44 | #define SHT15_READING_TEMP 1 | ||
45 | #define SHT15_READING_HUMID 2 | ||
46 | |||
47 | /* Min timings in nsecs */ | ||
48 | #define SHT15_TSCKL 100 /* clock low */ | ||
49 | #define SHT15_TSCKH 100 /* clock high */ | ||
50 | #define SHT15_TSU 150 /* data setup time */ | ||
51 | |||
52 | /** | ||
53 | * struct sht15_temppair - elements of voltage dependant temp calc | ||
54 | * @vdd: supply voltage in microvolts | ||
55 | * @d1: see data sheet | ||
56 | */ | ||
57 | struct sht15_temppair { | ||
58 | int vdd; /* microvolts */ | ||
59 | int d1; | ||
60 | }; | ||
61 | |||
62 | /* Table 9 from data sheet - relates temperature calculation | ||
63 | * to supply voltage. | ||
64 | */ | ||
65 | static const struct sht15_temppair temppoints[] = { | ||
66 | { 2500000, -39400 }, | ||
67 | { 3000000, -39600 }, | ||
68 | { 3500000, -39700 }, | ||
69 | { 4000000, -39800 }, | ||
70 | { 5000000, -40100 }, | ||
71 | }; | ||
72 | |||
73 | /** | ||
74 | * struct sht15_data - device instance specific data | ||
75 | * @pdata: platform data (gpio's etc) | ||
76 | * @read_work: bh of interrupt handler | ||
77 | * @wait_queue: wait queue for getting values from device | ||
78 | * @val_temp: last temperature value read from device | ||
79 | * @val_humid: last humidity value read from device | ||
80 | * @flag: status flag used to identify what the last request was | ||
81 | * @valid: are the current stored values valid (start condition) | ||
82 | * @last_updat: time of last update | ||
83 | * @read_lock: mutex to ensure only one read in progress | ||
84 | * at a time. | ||
85 | * @dev: associate device structure | ||
86 | * @hwmon_dev: device associated with hwmon subsystem | ||
87 | * @reg: associated regulator (if specified) | ||
88 | * @nb: notifier block to handle notifications of voltage changes | ||
89 | * @supply_uV: local copy of supply voltage used to allow | ||
90 | * use of regulator consumer if available | ||
91 | * @supply_uV_valid: indicates that an updated value has not yet | ||
92 | * been obtained from the regulator and so any calculations | ||
93 | * based upon it will be invalid. | ||
94 | * @update_supply_work: work struct that is used to update the supply_uV | ||
95 | * @interrupt_handled: flag used to indicate a hander has been scheduled | ||
96 | */ | ||
97 | struct sht15_data { | ||
98 | struct sht15_platform_data *pdata; | ||
99 | struct work_struct read_work; | ||
100 | wait_queue_head_t wait_queue; | ||
101 | uint16_t val_temp; | ||
102 | uint16_t val_humid; | ||
103 | u8 flag; | ||
104 | u8 valid; | ||
105 | unsigned long last_updat; | ||
106 | struct mutex read_lock; | ||
107 | struct device *dev; | ||
108 | struct device *hwmon_dev; | ||
109 | struct regulator *reg; | ||
110 | struct notifier_block nb; | ||
111 | int supply_uV; | ||
112 | int supply_uV_valid; | ||
113 | struct work_struct update_supply_work; | ||
114 | atomic_t interrupt_handled; | ||
115 | }; | ||
116 | |||
117 | /** | ||
118 | * sht15_connection_reset() - reset the comms interface | ||
119 | * @data: sht15 specific data | ||
120 | * | ||
121 | * This implements section 3.4 of the data sheet | ||
122 | */ | ||
123 | static void sht15_connection_reset(struct sht15_data *data) | ||
124 | { | ||
125 | int i; | ||
126 | gpio_direction_output(data->pdata->gpio_data, 1); | ||
127 | ndelay(SHT15_TSCKL); | ||
128 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
129 | ndelay(SHT15_TSCKL); | ||
130 | for (i = 0; i < 9; ++i) { | ||
131 | gpio_set_value(data->pdata->gpio_sck, 1); | ||
132 | ndelay(SHT15_TSCKH); | ||
133 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
134 | ndelay(SHT15_TSCKL); | ||
135 | } | ||
136 | } | ||
137 | /** | ||
138 | * sht15_send_bit() - send an individual bit to the device | ||
139 | * @data: device state data | ||
140 | * @val: value of bit to be sent | ||
141 | **/ | ||
142 | static inline void sht15_send_bit(struct sht15_data *data, int val) | ||
143 | { | ||
144 | |||
145 | gpio_set_value(data->pdata->gpio_data, val); | ||
146 | ndelay(SHT15_TSU); | ||
147 | gpio_set_value(data->pdata->gpio_sck, 1); | ||
148 | ndelay(SHT15_TSCKH); | ||
149 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
150 | ndelay(SHT15_TSCKL); /* clock low time */ | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * sht15_transmission_start() - specific sequence for new transmission | ||
155 | * | ||
156 | * @data: device state data | ||
157 | * Timings for this are not documented on the data sheet, so very | ||
158 | * conservative ones used in implementation. This implements | ||
159 | * figure 12 on the data sheet. | ||
160 | **/ | ||
161 | static void sht15_transmission_start(struct sht15_data *data) | ||
162 | { | ||
163 | /* ensure data is high and output */ | ||
164 | gpio_direction_output(data->pdata->gpio_data, 1); | ||
165 | ndelay(SHT15_TSU); | ||
166 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
167 | ndelay(SHT15_TSCKL); | ||
168 | gpio_set_value(data->pdata->gpio_sck, 1); | ||
169 | ndelay(SHT15_TSCKH); | ||
170 | gpio_set_value(data->pdata->gpio_data, 0); | ||
171 | ndelay(SHT15_TSU); | ||
172 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
173 | ndelay(SHT15_TSCKL); | ||
174 | gpio_set_value(data->pdata->gpio_sck, 1); | ||
175 | ndelay(SHT15_TSCKH); | ||
176 | gpio_set_value(data->pdata->gpio_data, 1); | ||
177 | ndelay(SHT15_TSU); | ||
178 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
179 | ndelay(SHT15_TSCKL); | ||
180 | } | ||
181 | /** | ||
182 | * sht15_send_byte() - send a single byte to the device | ||
183 | * @data: device state | ||
184 | * @byte: value to be sent | ||
185 | **/ | ||
186 | static void sht15_send_byte(struct sht15_data *data, u8 byte) | ||
187 | { | ||
188 | int i; | ||
189 | for (i = 0; i < 8; i++) { | ||
190 | sht15_send_bit(data, !!(byte & 0x80)); | ||
191 | byte <<= 1; | ||
192 | } | ||
193 | } | ||
194 | /** | ||
195 | * sht15_wait_for_response() - checks for ack from device | ||
196 | * @data: device state | ||
197 | **/ | ||
198 | static int sht15_wait_for_response(struct sht15_data *data) | ||
199 | { | ||
200 | gpio_direction_input(data->pdata->gpio_data); | ||
201 | gpio_set_value(data->pdata->gpio_sck, 1); | ||
202 | ndelay(SHT15_TSCKH); | ||
203 | if (gpio_get_value(data->pdata->gpio_data)) { | ||
204 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
205 | dev_err(data->dev, "Command not acknowledged\n"); | ||
206 | sht15_connection_reset(data); | ||
207 | return -EIO; | ||
208 | } | ||
209 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
210 | ndelay(SHT15_TSCKL); | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | /** | ||
215 | * sht15_send_cmd() - Sends a command to the device. | ||
216 | * @data: device state | ||
217 | * @cmd: command byte to be sent | ||
218 | * | ||
219 | * On entry, sck is output low, data is output pull high | ||
220 | * and the interrupt disabled. | ||
221 | **/ | ||
222 | static int sht15_send_cmd(struct sht15_data *data, u8 cmd) | ||
223 | { | ||
224 | int ret = 0; | ||
225 | sht15_transmission_start(data); | ||
226 | sht15_send_byte(data, cmd); | ||
227 | ret = sht15_wait_for_response(data); | ||
228 | return ret; | ||
229 | } | ||
230 | /** | ||
231 | * sht15_update_single_val() - get a new value from device | ||
232 | * @data: device instance specific data | ||
233 | * @command: command sent to request value | ||
234 | * @timeout_msecs: timeout after which comms are assumed | ||
235 | * to have failed are reset. | ||
236 | **/ | ||
237 | static inline int sht15_update_single_val(struct sht15_data *data, | ||
238 | int command, | ||
239 | int timeout_msecs) | ||
240 | { | ||
241 | int ret; | ||
242 | ret = sht15_send_cmd(data, command); | ||
243 | if (ret) | ||
244 | return ret; | ||
245 | |||
246 | gpio_direction_input(data->pdata->gpio_data); | ||
247 | atomic_set(&data->interrupt_handled, 0); | ||
248 | |||
249 | enable_irq(gpio_to_irq(data->pdata->gpio_data)); | ||
250 | if (gpio_get_value(data->pdata->gpio_data) == 0) { | ||
251 | disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); | ||
252 | /* Only relevant if the interrupt hasn't occured. */ | ||
253 | if (!atomic_read(&data->interrupt_handled)) | ||
254 | schedule_work(&data->read_work); | ||
255 | } | ||
256 | ret = wait_event_timeout(data->wait_queue, | ||
257 | (data->flag == SHT15_READING_NOTHING), | ||
258 | msecs_to_jiffies(timeout_msecs)); | ||
259 | if (ret == 0) {/* timeout occurred */ | ||
260 | disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));; | ||
261 | sht15_connection_reset(data); | ||
262 | return -ETIME; | ||
263 | } | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * sht15_update_vals() - get updated readings from device if too old | ||
269 | * @data: device state | ||
270 | **/ | ||
271 | static int sht15_update_vals(struct sht15_data *data) | ||
272 | { | ||
273 | int ret = 0; | ||
274 | int timeout = HZ; | ||
275 | |||
276 | mutex_lock(&data->read_lock); | ||
277 | if (time_after(jiffies, data->last_updat + timeout) | ||
278 | || !data->valid) { | ||
279 | data->flag = SHT15_READING_HUMID; | ||
280 | ret = sht15_update_single_val(data, SHT15_MEASURE_RH, 160); | ||
281 | if (ret) | ||
282 | goto error_ret; | ||
283 | data->flag = SHT15_READING_TEMP; | ||
284 | ret = sht15_update_single_val(data, SHT15_MEASURE_TEMP, 400); | ||
285 | if (ret) | ||
286 | goto error_ret; | ||
287 | data->valid = 1; | ||
288 | data->last_updat = jiffies; | ||
289 | } | ||
290 | error_ret: | ||
291 | mutex_unlock(&data->read_lock); | ||
292 | |||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * sht15_calc_temp() - convert the raw reading to a temperature | ||
298 | * @data: device state | ||
299 | * | ||
300 | * As per section 4.3 of the data sheet. | ||
301 | **/ | ||
302 | static inline int sht15_calc_temp(struct sht15_data *data) | ||
303 | { | ||
304 | int d1 = 0; | ||
305 | int i; | ||
306 | |||
307 | for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++) | ||
308 | /* Find pointer to interpolate */ | ||
309 | if (data->supply_uV > temppoints[i - 1].vdd) { | ||
310 | d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) | ||
311 | * (temppoints[i].d1 - temppoints[i - 1].d1) | ||
312 | / (temppoints[i].vdd - temppoints[i - 1].vdd) | ||
313 | + temppoints[i - 1].d1; | ||
314 | break; | ||
315 | } | ||
316 | |||
317 | return data->val_temp*10 + d1; | ||
318 | } | ||
319 | |||
320 | /** | ||
321 | * sht15_calc_humid() - using last temperature convert raw to humid | ||
322 | * @data: device state | ||
323 | * | ||
324 | * This is the temperature compensated version as per section 4.2 of | ||
325 | * the data sheet. | ||
326 | **/ | ||
327 | static inline int sht15_calc_humid(struct sht15_data *data) | ||
328 | { | ||
329 | int RHlinear; /* milli percent */ | ||
330 | int temp = sht15_calc_temp(data); | ||
331 | |||
332 | const int c1 = -4; | ||
333 | const int c2 = 40500; /* x 10 ^ -6 */ | ||
334 | const int c3 = 2800; /* x10 ^ -9 */ | ||
335 | |||
336 | RHlinear = c1*1000 | ||
337 | + c2 * data->val_humid/1000 | ||
338 | + (data->val_humid * data->val_humid * c3)/1000000; | ||
339 | return (temp - 25000) * (10000 + 800 * data->val_humid) | ||
340 | / 1000000 + RHlinear; | ||
341 | } | ||
342 | |||
343 | static ssize_t sht15_show_temp(struct device *dev, | ||
344 | struct device_attribute *attr, | ||
345 | char *buf) | ||
346 | { | ||
347 | int ret; | ||
348 | struct sht15_data *data = dev_get_drvdata(dev); | ||
349 | |||
350 | /* Technically no need to read humidity as well */ | ||
351 | ret = sht15_update_vals(data); | ||
352 | |||
353 | return ret ? ret : sprintf(buf, "%d\n", | ||
354 | sht15_calc_temp(data)); | ||
355 | } | ||
356 | |||
357 | static ssize_t sht15_show_humidity(struct device *dev, | ||
358 | struct device_attribute *attr, | ||
359 | char *buf) | ||
360 | { | ||
361 | int ret; | ||
362 | struct sht15_data *data = dev_get_drvdata(dev); | ||
363 | |||
364 | ret = sht15_update_vals(data); | ||
365 | |||
366 | return ret ? ret : sprintf(buf, "%d\n", sht15_calc_humid(data)); | ||
367 | |||
368 | }; | ||
369 | static ssize_t show_name(struct device *dev, | ||
370 | struct device_attribute *attr, | ||
371 | char *buf) | ||
372 | { | ||
373 | struct platform_device *pdev = to_platform_device(dev); | ||
374 | return sprintf(buf, "%s\n", pdev->name); | ||
375 | } | ||
376 | |||
377 | static SENSOR_DEVICE_ATTR(temp1_input, | ||
378 | S_IRUGO, sht15_show_temp, | ||
379 | NULL, 0); | ||
380 | static SENSOR_DEVICE_ATTR(humidity1_input, | ||
381 | S_IRUGO, sht15_show_humidity, | ||
382 | NULL, 0); | ||
383 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); | ||
384 | static struct attribute *sht15_attrs[] = { | ||
385 | &sensor_dev_attr_temp1_input.dev_attr.attr, | ||
386 | &sensor_dev_attr_humidity1_input.dev_attr.attr, | ||
387 | &dev_attr_name.attr, | ||
388 | NULL, | ||
389 | }; | ||
390 | |||
391 | static const struct attribute_group sht15_attr_group = { | ||
392 | .attrs = sht15_attrs, | ||
393 | }; | ||
394 | |||
395 | static irqreturn_t sht15_interrupt_fired(int irq, void *d) | ||
396 | { | ||
397 | struct sht15_data *data = d; | ||
398 | /* First disable the interrupt */ | ||
399 | disable_irq_nosync(irq); | ||
400 | atomic_inc(&data->interrupt_handled); | ||
401 | /* Then schedule a reading work struct */ | ||
402 | if (data->flag != SHT15_READING_NOTHING) | ||
403 | schedule_work(&data->read_work); | ||
404 | return IRQ_HANDLED; | ||
405 | } | ||
406 | |||
407 | /* Each byte of data is acknowledged by pulling the data line | ||
408 | * low for one clock pulse. | ||
409 | */ | ||
410 | static void sht15_ack(struct sht15_data *data) | ||
411 | { | ||
412 | gpio_direction_output(data->pdata->gpio_data, 0); | ||
413 | ndelay(SHT15_TSU); | ||
414 | gpio_set_value(data->pdata->gpio_sck, 1); | ||
415 | ndelay(SHT15_TSU); | ||
416 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
417 | ndelay(SHT15_TSU); | ||
418 | gpio_set_value(data->pdata->gpio_data, 1); | ||
419 | |||
420 | gpio_direction_input(data->pdata->gpio_data); | ||
421 | } | ||
422 | /** | ||
423 | * sht15_end_transmission() - notify device of end of transmission | ||
424 | * @data: device state | ||
425 | * | ||
426 | * This is basically a NAK. (single clock pulse, data high) | ||
427 | **/ | ||
428 | static void sht15_end_transmission(struct sht15_data *data) | ||
429 | { | ||
430 | gpio_direction_output(data->pdata->gpio_data, 1); | ||
431 | ndelay(SHT15_TSU); | ||
432 | gpio_set_value(data->pdata->gpio_sck, 1); | ||
433 | ndelay(SHT15_TSCKH); | ||
434 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
435 | ndelay(SHT15_TSCKL); | ||
436 | } | ||
437 | |||
438 | static void sht15_bh_read_data(struct work_struct *work_s) | ||
439 | { | ||
440 | int i; | ||
441 | uint16_t val = 0; | ||
442 | struct sht15_data *data | ||
443 | = container_of(work_s, struct sht15_data, | ||
444 | read_work); | ||
445 | /* Firstly, verify the line is low */ | ||
446 | if (gpio_get_value(data->pdata->gpio_data)) { | ||
447 | /* If not, then start the interrupt again - care | ||
448 | here as could have gone low in meantime so verify | ||
449 | it hasn't! | ||
450 | */ | ||
451 | atomic_set(&data->interrupt_handled, 0); | ||
452 | enable_irq(gpio_to_irq(data->pdata->gpio_data)); | ||
453 | /* If still not occured or another handler has been scheduled */ | ||
454 | if (gpio_get_value(data->pdata->gpio_data) | ||
455 | || atomic_read(&data->interrupt_handled)) | ||
456 | return; | ||
457 | } | ||
458 | /* Read the data back from the device */ | ||
459 | for (i = 0; i < 16; ++i) { | ||
460 | val <<= 1; | ||
461 | gpio_set_value(data->pdata->gpio_sck, 1); | ||
462 | ndelay(SHT15_TSCKH); | ||
463 | val |= !!gpio_get_value(data->pdata->gpio_data); | ||
464 | gpio_set_value(data->pdata->gpio_sck, 0); | ||
465 | ndelay(SHT15_TSCKL); | ||
466 | if (i == 7) | ||
467 | sht15_ack(data); | ||
468 | } | ||
469 | /* Tell the device we are done */ | ||
470 | sht15_end_transmission(data); | ||
471 | |||
472 | switch (data->flag) { | ||
473 | case SHT15_READING_TEMP: | ||
474 | data->val_temp = val; | ||
475 | break; | ||
476 | case SHT15_READING_HUMID: | ||
477 | data->val_humid = val; | ||
478 | break; | ||
479 | } | ||
480 | |||
481 | data->flag = SHT15_READING_NOTHING; | ||
482 | wake_up(&data->wait_queue); | ||
483 | } | ||
484 | |||
485 | static void sht15_update_voltage(struct work_struct *work_s) | ||
486 | { | ||
487 | struct sht15_data *data | ||
488 | = container_of(work_s, struct sht15_data, | ||
489 | update_supply_work); | ||
490 | data->supply_uV = regulator_get_voltage(data->reg); | ||
491 | } | ||
492 | |||
493 | /** | ||
494 | * sht15_invalidate_voltage() - mark supply voltage invalid when notified by reg | ||
495 | * @nb: associated notification structure | ||
496 | * @event: voltage regulator state change event code | ||
497 | * @ignored: function parameter - ignored here | ||
498 | * | ||
499 | * Note that as the notification code holds the regulator lock, we have | ||
500 | * to schedule an update of the supply voltage rather than getting it directly. | ||
501 | **/ | ||
502 | static int sht15_invalidate_voltage(struct notifier_block *nb, | ||
503 | unsigned long event, | ||
504 | void *ignored) | ||
505 | { | ||
506 | struct sht15_data *data = container_of(nb, struct sht15_data, nb); | ||
507 | |||
508 | if (event == REGULATOR_EVENT_VOLTAGE_CHANGE) | ||
509 | data->supply_uV_valid = false; | ||
510 | schedule_work(&data->update_supply_work); | ||
511 | |||
512 | return NOTIFY_OK; | ||
513 | } | ||
514 | |||
515 | static int __devinit sht15_probe(struct platform_device *pdev) | ||
516 | { | ||
517 | int ret = 0; | ||
518 | struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
519 | |||
520 | if (!data) { | ||
521 | ret = -ENOMEM; | ||
522 | dev_err(&pdev->dev, "kzalloc failed"); | ||
523 | goto error_ret; | ||
524 | } | ||
525 | |||
526 | INIT_WORK(&data->read_work, sht15_bh_read_data); | ||
527 | INIT_WORK(&data->update_supply_work, sht15_update_voltage); | ||
528 | platform_set_drvdata(pdev, data); | ||
529 | mutex_init(&data->read_lock); | ||
530 | data->dev = &pdev->dev; | ||
531 | init_waitqueue_head(&data->wait_queue); | ||
532 | |||
533 | if (pdev->dev.platform_data == NULL) { | ||
534 | dev_err(&pdev->dev, "no platform data supplied"); | ||
535 | goto err_free_data; | ||
536 | } | ||
537 | data->pdata = pdev->dev.platform_data; | ||
538 | data->supply_uV = data->pdata->supply_mv*1000; | ||
539 | |||
540 | /* If a regulator is available, query what the supply voltage actually is!*/ | ||
541 | data->reg = regulator_get(data->dev, "vcc"); | ||
542 | if (!IS_ERR(data->reg)) { | ||
543 | data->supply_uV = regulator_get_voltage(data->reg); | ||
544 | regulator_enable(data->reg); | ||
545 | /* setup a notifier block to update this if another device | ||
546 | * causes the voltage to change */ | ||
547 | data->nb.notifier_call = &sht15_invalidate_voltage; | ||
548 | ret = regulator_register_notifier(data->reg, &data->nb); | ||
549 | } | ||
550 | /* Try requesting the GPIOs */ | ||
551 | ret = gpio_request(data->pdata->gpio_sck, "SHT15 sck"); | ||
552 | if (ret) { | ||
553 | dev_err(&pdev->dev, "gpio request failed"); | ||
554 | goto err_free_data; | ||
555 | } | ||
556 | gpio_direction_output(data->pdata->gpio_sck, 0); | ||
557 | ret = gpio_request(data->pdata->gpio_data, "SHT15 data"); | ||
558 | if (ret) { | ||
559 | dev_err(&pdev->dev, "gpio request failed"); | ||
560 | goto err_release_gpio_sck; | ||
561 | } | ||
562 | ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group); | ||
563 | if (ret) { | ||
564 | dev_err(&pdev->dev, "sysfs create failed"); | ||
565 | goto err_free_data; | ||
566 | } | ||
567 | |||
568 | ret = request_irq(gpio_to_irq(data->pdata->gpio_data), | ||
569 | sht15_interrupt_fired, | ||
570 | IRQF_TRIGGER_FALLING, | ||
571 | "sht15 data", | ||
572 | data); | ||
573 | if (ret) { | ||
574 | dev_err(&pdev->dev, "failed to get irq for data line"); | ||
575 | goto err_release_gpio_data; | ||
576 | } | ||
577 | disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); | ||
578 | sht15_connection_reset(data); | ||
579 | sht15_send_cmd(data, 0x1E); | ||
580 | |||
581 | data->hwmon_dev = hwmon_device_register(data->dev); | ||
582 | if (IS_ERR(data->hwmon_dev)) { | ||
583 | ret = PTR_ERR(data->hwmon_dev); | ||
584 | goto err_release_gpio_data; | ||
585 | } | ||
586 | return 0; | ||
587 | |||
588 | err_release_gpio_data: | ||
589 | gpio_free(data->pdata->gpio_data); | ||
590 | err_release_gpio_sck: | ||
591 | gpio_free(data->pdata->gpio_sck); | ||
592 | err_free_data: | ||
593 | kfree(data); | ||
594 | error_ret: | ||
595 | |||
596 | return ret; | ||
597 | } | ||
598 | |||
599 | static int __devexit sht15_remove(struct platform_device *pdev) | ||
600 | { | ||
601 | struct sht15_data *data = platform_get_drvdata(pdev); | ||
602 | |||
603 | /* Make sure any reads from the device are done and | ||
604 | * prevent new ones beginnning */ | ||
605 | mutex_lock(&data->read_lock); | ||
606 | hwmon_device_unregister(data->hwmon_dev); | ||
607 | sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group); | ||
608 | if (!IS_ERR(data->reg)) { | ||
609 | regulator_unregister_notifier(data->reg, &data->nb); | ||
610 | regulator_disable(data->reg); | ||
611 | regulator_put(data->reg); | ||
612 | } | ||
613 | |||
614 | free_irq(gpio_to_irq(data->pdata->gpio_data), data); | ||
615 | gpio_free(data->pdata->gpio_data); | ||
616 | gpio_free(data->pdata->gpio_sck); | ||
617 | mutex_unlock(&data->read_lock); | ||
618 | kfree(data); | ||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | |||
623 | static struct platform_driver sht_drivers[] = { | ||
624 | { | ||
625 | .driver = { | ||
626 | .name = "sht10", | ||
627 | .owner = THIS_MODULE, | ||
628 | }, | ||
629 | .probe = sht15_probe, | ||
630 | .remove = sht15_remove, | ||
631 | }, { | ||
632 | .driver = { | ||
633 | .name = "sht11", | ||
634 | .owner = THIS_MODULE, | ||
635 | }, | ||
636 | .probe = sht15_probe, | ||
637 | .remove = sht15_remove, | ||
638 | }, { | ||
639 | .driver = { | ||
640 | .name = "sht15", | ||
641 | .owner = THIS_MODULE, | ||
642 | }, | ||
643 | .probe = sht15_probe, | ||
644 | .remove = sht15_remove, | ||
645 | }, { | ||
646 | .driver = { | ||
647 | .name = "sht71", | ||
648 | .owner = THIS_MODULE, | ||
649 | }, | ||
650 | .probe = sht15_probe, | ||
651 | .remove = sht15_remove, | ||
652 | }, { | ||
653 | .driver = { | ||
654 | .name = "sht75", | ||
655 | .owner = THIS_MODULE, | ||
656 | }, | ||
657 | .probe = sht15_probe, | ||
658 | .remove = sht15_remove, | ||
659 | }, | ||
660 | }; | ||
661 | |||
662 | |||
663 | static int __init sht15_init(void) | ||
664 | { | ||
665 | int ret; | ||
666 | int i; | ||
667 | |||
668 | for (i = 0; i < ARRAY_SIZE(sht_drivers); i++) { | ||
669 | ret = platform_driver_register(&sht_drivers[i]); | ||
670 | if (ret) | ||
671 | goto error_unreg; | ||
672 | } | ||
673 | |||
674 | return 0; | ||
675 | |||
676 | error_unreg: | ||
677 | while (--i >= 0) | ||
678 | platform_driver_unregister(&sht_drivers[i]); | ||
679 | |||
680 | return ret; | ||
681 | } | ||
682 | module_init(sht15_init); | ||
683 | |||
684 | static void __exit sht15_exit(void) | ||
685 | { | ||
686 | int i; | ||
687 | for (i = ARRAY_SIZE(sht_drivers) - 1; i >= 0; i--) | ||
688 | platform_driver_unregister(&sht_drivers[i]); | ||
689 | } | ||
690 | module_exit(sht15_exit); | ||
691 | |||
692 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index f68e5f8e23ee..6318f7ddc1d4 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c | |||
@@ -190,7 +190,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, | |||
190 | int completed = 1; | 190 | int completed = 1; |
191 | unsigned long timeout = jiffies + i2c_adap->timeout; | 191 | unsigned long timeout = jiffies + i2c_adap->timeout; |
192 | 192 | ||
193 | while (pca_status(adap) != 0xf8) { | 193 | while ((state = pca_status(adap)) != 0xf8) { |
194 | if (time_before(jiffies, timeout)) { | 194 | if (time_before(jiffies, timeout)) { |
195 | msleep(10); | 195 | msleep(10); |
196 | } else { | 196 | } else { |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 94eae5c3cbc7..a48c8aee0218 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -604,12 +604,14 @@ comment "Graphics adapter I2C/DDC channel drivers" | |||
604 | depends on PCI | 604 | depends on PCI |
605 | 605 | ||
606 | config I2C_VOODOO3 | 606 | config I2C_VOODOO3 |
607 | tristate "Voodoo 3" | 607 | tristate "Voodoo 3 (DEPRECATED)" |
608 | depends on PCI | 608 | depends on PCI |
609 | select I2C_ALGOBIT | 609 | select I2C_ALGOBIT |
610 | help | 610 | help |
611 | If you say yes to this option, support will be included for the | 611 | If you say yes to this option, support will be included for the |
612 | Voodoo 3 I2C interface. | 612 | Voodoo 3 I2C interface. This driver is deprecated and you should |
613 | use the tdfxfb driver instead, which additionally provides | ||
614 | framebuffer support. | ||
613 | 615 | ||
614 | This driver can also be built as a module. If so, the module | 616 | This driver can also be built as a module. If so, the module |
615 | will be called i2c-voodoo3. | 617 | will be called i2c-voodoo3. |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index b6f3a0de6ca2..85e2e919d1cd 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -716,8 +716,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) | |||
716 | 716 | ||
717 | /* new style driver methods can't mix with legacy ones */ | 717 | /* new style driver methods can't mix with legacy ones */ |
718 | if (is_newstyle_driver(driver)) { | 718 | if (is_newstyle_driver(driver)) { |
719 | if (driver->attach_adapter || driver->detach_adapter | 719 | if (driver->detach_adapter || driver->detach_client) { |
720 | || driver->detach_client) { | ||
721 | printk(KERN_WARNING | 720 | printk(KERN_WARNING |
722 | "i2c-core: driver [%s] is confused\n", | 721 | "i2c-core: driver [%s] is confused\n", |
723 | driver->driver.name); | 722 | driver->driver.name); |
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c index 8eda552326e9..403d0e4265db 100644 --- a/drivers/ide/at91_ide.c +++ b/drivers/ide/at91_ide.c | |||
@@ -20,7 +20,6 @@ | |||
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/version.h> | ||
24 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
25 | #include <linux/module.h> | 24 | #include <linux/module.h> |
26 | #include <linux/clk.h> | 25 | #include <linux/clk.h> |
@@ -175,90 +174,6 @@ static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, | |||
175 | leave_16bit(chipselect, mode); | 174 | leave_16bit(chipselect, mode); |
176 | } | 175 | } |
177 | 176 | ||
178 | static u8 ide_mm_inb(unsigned long port) | ||
179 | { | ||
180 | return readb((void __iomem *) port); | ||
181 | } | ||
182 | |||
183 | static void ide_mm_outb(u8 value, unsigned long port) | ||
184 | { | ||
185 | writeb(value, (void __iomem *) port); | ||
186 | } | ||
187 | |||
188 | static void at91_ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) | ||
189 | { | ||
190 | ide_hwif_t *hwif = drive->hwif; | ||
191 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
192 | struct ide_taskfile *tf = &cmd->tf; | ||
193 | u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; | ||
194 | |||
195 | if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) | ||
196 | HIHI = 0xFF; | ||
197 | |||
198 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) | ||
199 | ide_mm_outb(tf->hob_feature, io_ports->feature_addr); | ||
200 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) | ||
201 | ide_mm_outb(tf->hob_nsect, io_ports->nsect_addr); | ||
202 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) | ||
203 | ide_mm_outb(tf->hob_lbal, io_ports->lbal_addr); | ||
204 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) | ||
205 | ide_mm_outb(tf->hob_lbam, io_ports->lbam_addr); | ||
206 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) | ||
207 | ide_mm_outb(tf->hob_lbah, io_ports->lbah_addr); | ||
208 | |||
209 | if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) | ||
210 | ide_mm_outb(tf->feature, io_ports->feature_addr); | ||
211 | if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) | ||
212 | ide_mm_outb(tf->nsect, io_ports->nsect_addr); | ||
213 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) | ||
214 | ide_mm_outb(tf->lbal, io_ports->lbal_addr); | ||
215 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) | ||
216 | ide_mm_outb(tf->lbam, io_ports->lbam_addr); | ||
217 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) | ||
218 | ide_mm_outb(tf->lbah, io_ports->lbah_addr); | ||
219 | |||
220 | if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) | ||
221 | ide_mm_outb((tf->device & HIHI) | drive->select, io_ports->device_addr); | ||
222 | } | ||
223 | |||
224 | static void at91_ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) | ||
225 | { | ||
226 | ide_hwif_t *hwif = drive->hwif; | ||
227 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
228 | struct ide_taskfile *tf = &cmd->tf; | ||
229 | |||
230 | /* be sure we're looking at the low order bits */ | ||
231 | ide_mm_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
232 | |||
233 | if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) | ||
234 | tf->error = ide_mm_inb(io_ports->feature_addr); | ||
235 | if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) | ||
236 | tf->nsect = ide_mm_inb(io_ports->nsect_addr); | ||
237 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) | ||
238 | tf->lbal = ide_mm_inb(io_ports->lbal_addr); | ||
239 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) | ||
240 | tf->lbam = ide_mm_inb(io_ports->lbam_addr); | ||
241 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) | ||
242 | tf->lbah = ide_mm_inb(io_ports->lbah_addr); | ||
243 | if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) | ||
244 | tf->device = ide_mm_inb(io_ports->device_addr); | ||
245 | |||
246 | if (cmd->tf_flags & IDE_TFLAG_LBA48) { | ||
247 | ide_mm_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
248 | |||
249 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) | ||
250 | tf->hob_error = ide_mm_inb(io_ports->feature_addr); | ||
251 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) | ||
252 | tf->hob_nsect = ide_mm_inb(io_ports->nsect_addr); | ||
253 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) | ||
254 | tf->hob_lbal = ide_mm_inb(io_ports->lbal_addr); | ||
255 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) | ||
256 | tf->hob_lbam = ide_mm_inb(io_ports->lbam_addr); | ||
257 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) | ||
258 | tf->hob_lbah = ide_mm_inb(io_ports->lbah_addr); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) | 177 | static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) |
263 | { | 178 | { |
264 | struct ide_timing *timing; | 179 | struct ide_timing *timing; |
@@ -284,8 +199,8 @@ static const struct ide_tp_ops at91_ide_tp_ops = { | |||
284 | .write_devctl = ide_write_devctl, | 199 | .write_devctl = ide_write_devctl, |
285 | 200 | ||
286 | .dev_select = ide_dev_select, | 201 | .dev_select = ide_dev_select, |
287 | .tf_load = at91_ide_tf_load, | 202 | .tf_load = ide_tf_load, |
288 | .tf_read = at91_ide_tf_read, | 203 | .tf_read = ide_tf_read, |
289 | 204 | ||
290 | .input_data = at91_ide_input_data, | 205 | .input_data = at91_ide_input_data, |
291 | .output_data = at91_ide_output_data, | 206 | .output_data = at91_ide_output_data, |
@@ -300,7 +215,7 @@ static const struct ide_port_info at91_ide_port_info __initdata = { | |||
300 | .tp_ops = &at91_ide_tp_ops, | 215 | .tp_ops = &at91_ide_tp_ops, |
301 | .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE | | 216 | .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE | |
302 | IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS, | 217 | IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS, |
303 | .pio_mask = ATA_PIO5, | 218 | .pio_mask = ATA_PIO6, |
304 | }; | 219 | }; |
305 | 220 | ||
306 | /* | 221 | /* |
diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c index 353a35bbba63..0332a95eefd4 100644 --- a/drivers/ide/cs5536.c +++ b/drivers/ide/cs5536.c | |||
@@ -236,6 +236,7 @@ static const struct ide_dma_ops cs5536_dma_ops = { | |||
236 | .dma_test_irq = ide_dma_test_irq, | 236 | .dma_test_irq = ide_dma_test_irq, |
237 | .dma_lost_irq = ide_dma_lost_irq, | 237 | .dma_lost_irq = ide_dma_lost_irq, |
238 | .dma_timer_expiry = ide_dma_sff_timer_expiry, | 238 | .dma_timer_expiry = ide_dma_sff_timer_expiry, |
239 | .dma_sff_read_status = ide_dma_sff_read_status, | ||
239 | }; | 240 | }; |
240 | 241 | ||
241 | static const struct ide_port_info cs5536_info = { | 242 | static const struct ide_port_info cs5536_info = { |
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c index afa2af9a362b..0e2df6755ec9 100644 --- a/drivers/ide/falconide.c +++ b/drivers/ide/falconide.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/atarihw.h> | 20 | #include <asm/atarihw.h> |
21 | #include <asm/atariints.h> | 21 | #include <asm/atariints.h> |
22 | #include <asm/atari_stdma.h> | 22 | #include <asm/atari_stdma.h> |
23 | #include <asm/ide.h> | ||
23 | 24 | ||
24 | #define DRV_NAME "falconide" | 25 | #define DRV_NAME "falconide" |
25 | 26 | ||
@@ -67,8 +68,10 @@ static void falconide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, | |||
67 | { | 68 | { |
68 | unsigned long data_addr = drive->hwif->io_ports.data_addr; | 69 | unsigned long data_addr = drive->hwif->io_ports.data_addr; |
69 | 70 | ||
70 | if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) | 71 | if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { |
71 | return insw(data_addr, buf, (len + 1) / 2); | 72 | __ide_mm_insw(data_addr, buf, (len + 1) / 2); |
73 | return; | ||
74 | } | ||
72 | 75 | ||
73 | raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); | 76 | raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); |
74 | } | 77 | } |
@@ -78,8 +81,10 @@ static void falconide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, | |||
78 | { | 81 | { |
79 | unsigned long data_addr = drive->hwif->io_ports.data_addr; | 82 | unsigned long data_addr = drive->hwif->io_ports.data_addr; |
80 | 83 | ||
81 | if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) | 84 | if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { |
82 | return outsw(data_addr, buf, (len + 1) / 2); | 85 | __ide_mm_outsw(data_addr, buf, (len + 1) / 2); |
86 | return; | ||
87 | } | ||
83 | 88 | ||
84 | raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); | 89 | raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); |
85 | } | 90 | } |
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index a0eb87f59134..0feb66c720e1 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. | 3 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. |
4 | * Portions Copyright (C) 2003 Red Hat Inc | 4 | * Portions Copyright (C) 2003 Red Hat Inc |
5 | * Portions Copyright (C) 2007 Bartlomiej Zolnierkiewicz | 5 | * Portions Copyright (C) 2007 Bartlomiej Zolnierkiewicz |
6 | * Portions Copyright (C) 2005-2008 MontaVista Software, Inc. | 6 | * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. |
7 | * | 7 | * |
8 | * Thanks to HighPoint Technologies for their assistance, and hardware. | 8 | * Thanks to HighPoint Technologies for their assistance, and hardware. |
9 | * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his | 9 | * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his |
@@ -114,6 +114,8 @@ | |||
114 | * the register setting lists into the table indexed by the clock selected | 114 | * the register setting lists into the table indexed by the clock selected |
115 | * - set the correct hwif->ultra_mask for each individual chip | 115 | * - set the correct hwif->ultra_mask for each individual chip |
116 | * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards | 116 | * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards |
117 | * - stop resetting HPT370's state machine before each DMA transfer as that has | ||
118 | * caused more harm than good | ||
117 | * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com> | 119 | * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com> |
118 | */ | 120 | */ |
119 | 121 | ||
@@ -133,7 +135,7 @@ | |||
133 | #define DRV_NAME "hpt366" | 135 | #define DRV_NAME "hpt366" |
134 | 136 | ||
135 | /* various tuning parameters */ | 137 | /* various tuning parameters */ |
136 | #define HPT_RESET_STATE_ENGINE | 138 | #undef HPT_RESET_STATE_ENGINE |
137 | #undef HPT_DELAY_INTERRUPT | 139 | #undef HPT_DELAY_INTERRUPT |
138 | 140 | ||
139 | static const char *quirk_drives[] = { | 141 | static const char *quirk_drives[] = { |
@@ -808,7 +810,7 @@ static void hpt370_irq_timeout(ide_drive_t *drive) | |||
808 | /* get DMA command mode */ | 810 | /* get DMA command mode */ |
809 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); | 811 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
810 | /* stop DMA */ | 812 | /* stop DMA */ |
811 | outb(dma_cmd & ~0x1, hwif->dma_base + ATA_DMA_CMD); | 813 | outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); |
812 | hpt370_clear_engine(drive); | 814 | hpt370_clear_engine(drive); |
813 | } | 815 | } |
814 | 816 | ||
@@ -825,11 +827,11 @@ static int hpt370_dma_end(ide_drive_t *drive) | |||
825 | ide_hwif_t *hwif = drive->hwif; | 827 | ide_hwif_t *hwif = drive->hwif; |
826 | u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); | 828 | u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
827 | 829 | ||
828 | if (dma_stat & 0x01) { | 830 | if (dma_stat & ATA_DMA_ACTIVE) { |
829 | /* wait a little */ | 831 | /* wait a little */ |
830 | udelay(20); | 832 | udelay(20); |
831 | dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); | 833 | dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
832 | if (dma_stat & 0x01) | 834 | if (dma_stat & ATA_DMA_ACTIVE) |
833 | hpt370_irq_timeout(drive); | 835 | hpt370_irq_timeout(drive); |
834 | } | 836 | } |
835 | return ide_dma_end(drive); | 837 | return ide_dma_end(drive); |
@@ -851,7 +853,7 @@ static int hpt374_dma_test_irq(ide_drive_t *drive) | |||
851 | 853 | ||
852 | dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); | 854 | dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
853 | /* return 1 if INTR asserted */ | 855 | /* return 1 if INTR asserted */ |
854 | if (dma_stat & 4) | 856 | if (dma_stat & ATA_DMA_INTR) |
855 | return 1; | 857 | return 1; |
856 | 858 | ||
857 | return 0; | 859 | return 0; |
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 12f436951bff..77f79d26b264 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c | |||
@@ -318,8 +318,9 @@ static int do_drive_set_taskfiles(ide_drive_t *drive, | |||
318 | 318 | ||
319 | /* convert GTF to taskfile */ | 319 | /* convert GTF to taskfile */ |
320 | memset(&cmd, 0, sizeof(cmd)); | 320 | memset(&cmd, 0, sizeof(cmd)); |
321 | memcpy(&cmd.tf_array[7], gtf, REGS_PER_GTF); | 321 | memcpy(&cmd.tf.feature, gtf, REGS_PER_GTF); |
322 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 322 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
323 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
323 | 324 | ||
324 | err = ide_no_data_taskfile(drive, &cmd); | 325 | err = ide_no_data_taskfile(drive, &cmd); |
325 | if (err) { | 326 | if (err) { |
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 3e43b889dd64..7201b176d75b 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
@@ -254,16 +254,13 @@ EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); | |||
254 | 254 | ||
255 | void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason) | 255 | void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason) |
256 | { | 256 | { |
257 | struct ide_cmd cmd; | 257 | struct ide_taskfile tf; |
258 | 258 | ||
259 | memset(&cmd, 0, sizeof(cmd)); | 259 | drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT | |
260 | cmd.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM | | 260 | IDE_VALID_LBAM | IDE_VALID_LBAH); |
261 | IDE_TFLAG_IN_NSECT; | ||
262 | 261 | ||
263 | drive->hwif->tp_ops->tf_read(drive, &cmd); | 262 | *bcount = (tf.lbah << 8) | tf.lbam; |
264 | 263 | *ireason = tf.nsect & 3; | |
265 | *bcount = (cmd.tf.lbah << 8) | cmd.tf.lbam; | ||
266 | *ireason = cmd.tf.nsect & 3; | ||
267 | } | 264 | } |
268 | EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason); | 265 | EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason); |
269 | 266 | ||
@@ -439,12 +436,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
439 | return ide_started; | 436 | return ide_started; |
440 | } | 437 | } |
441 | 438 | ||
442 | static void ide_init_packet_cmd(struct ide_cmd *cmd, u32 tf_flags, | 439 | static void ide_init_packet_cmd(struct ide_cmd *cmd, u8 valid_tf, |
443 | u16 bcount, u8 dma) | 440 | u16 bcount, u8 dma) |
444 | { | 441 | { |
445 | cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO; | 442 | cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO; |
446 | cmd->tf_flags |= IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | | 443 | cmd->valid.out.tf = IDE_VALID_LBAH | IDE_VALID_LBAM | |
447 | IDE_TFLAG_OUT_FEATURE | tf_flags; | 444 | IDE_VALID_FEATURE | valid_tf; |
448 | cmd->tf.command = ATA_CMD_PACKET; | 445 | cmd->tf.command = ATA_CMD_PACKET; |
449 | cmd->tf.feature = dma; /* Use PIO/DMA */ | 446 | cmd->tf.feature = dma; /* Use PIO/DMA */ |
450 | cmd->tf.lbam = bcount & 0xff; | 447 | cmd->tf.lbam = bcount & 0xff; |
@@ -453,14 +450,11 @@ static void ide_init_packet_cmd(struct ide_cmd *cmd, u32 tf_flags, | |||
453 | 450 | ||
454 | static u8 ide_read_ireason(ide_drive_t *drive) | 451 | static u8 ide_read_ireason(ide_drive_t *drive) |
455 | { | 452 | { |
456 | struct ide_cmd cmd; | 453 | struct ide_taskfile tf; |
457 | |||
458 | memset(&cmd, 0, sizeof(cmd)); | ||
459 | cmd.tf_flags = IDE_TFLAG_IN_NSECT; | ||
460 | 454 | ||
461 | drive->hwif->tp_ops->tf_read(drive, &cmd); | 455 | drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT); |
462 | 456 | ||
463 | return cmd.tf.nsect & 3; | 457 | return tf.nsect & 3; |
464 | } | 458 | } |
465 | 459 | ||
466 | static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) | 460 | static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) |
@@ -588,12 +582,12 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd) | |||
588 | ide_expiry_t *expiry = NULL; | 582 | ide_expiry_t *expiry = NULL; |
589 | struct request *rq = hwif->rq; | 583 | struct request *rq = hwif->rq; |
590 | unsigned int timeout; | 584 | unsigned int timeout; |
591 | u32 tf_flags; | ||
592 | u16 bcount; | 585 | u16 bcount; |
586 | u8 valid_tf; | ||
593 | u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT); | 587 | u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT); |
594 | 588 | ||
595 | if (dev_is_idecd(drive)) { | 589 | if (dev_is_idecd(drive)) { |
596 | tf_flags = IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL; | 590 | valid_tf = IDE_VALID_NSECT | IDE_VALID_LBAL; |
597 | bcount = ide_cd_get_xferlen(rq); | 591 | bcount = ide_cd_get_xferlen(rq); |
598 | expiry = ide_cd_expiry; | 592 | expiry = ide_cd_expiry; |
599 | timeout = ATAPI_WAIT_PC; | 593 | timeout = ATAPI_WAIT_PC; |
@@ -607,7 +601,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd) | |||
607 | pc->xferred = 0; | 601 | pc->xferred = 0; |
608 | pc->cur_pos = pc->buf; | 602 | pc->cur_pos = pc->buf; |
609 | 603 | ||
610 | tf_flags = IDE_TFLAG_OUT_DEVICE; | 604 | valid_tf = IDE_VALID_DEVICE; |
611 | bcount = ((drive->media == ide_tape) ? | 605 | bcount = ((drive->media == ide_tape) ? |
612 | pc->req_xfer : | 606 | pc->req_xfer : |
613 | min(pc->req_xfer, 63 * 1024)); | 607 | min(pc->req_xfer, 63 * 1024)); |
@@ -627,7 +621,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd) | |||
627 | : WAIT_TAPE_CMD; | 621 | : WAIT_TAPE_CMD; |
628 | } | 622 | } |
629 | 623 | ||
630 | ide_init_packet_cmd(cmd, tf_flags, bcount, drive->dma); | 624 | ide_init_packet_cmd(cmd, valid_tf, bcount, drive->dma); |
631 | 625 | ||
632 | (void)do_rw_taskfile(drive, cmd); | 626 | (void)do_rw_taskfile(drive, cmd); |
633 | 627 | ||
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 35729a47f797..3aec19d1fdfc 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -265,35 +265,62 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) | |||
265 | cdrom_analyze_sense_data(drive, NULL, sense); | 265 | cdrom_analyze_sense_data(drive, NULL, sense); |
266 | } | 266 | } |
267 | 267 | ||
268 | |||
268 | /* | 269 | /* |
270 | * Allow the drive 5 seconds to recover; some devices will return NOT_READY | ||
271 | * while flushing data from cache. | ||
272 | * | ||
273 | * returns: 0 failed (write timeout expired) | ||
274 | * 1 success | ||
275 | */ | ||
276 | static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) | ||
277 | { | ||
278 | |||
279 | struct cdrom_info *info = drive->driver_data; | ||
280 | |||
281 | if (!rq->errors) | ||
282 | info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY; | ||
283 | |||
284 | rq->errors = 1; | ||
285 | |||
286 | if (time_after(jiffies, info->write_timeout)) | ||
287 | return 0; | ||
288 | else { | ||
289 | struct request_queue *q = drive->queue; | ||
290 | unsigned long flags; | ||
291 | |||
292 | /* | ||
293 | * take a breather relying on the unplug timer to kick us again | ||
294 | */ | ||
295 | |||
296 | spin_lock_irqsave(q->queue_lock, flags); | ||
297 | blk_plug_device(q); | ||
298 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
299 | |||
300 | return 1; | ||
301 | } | ||
302 | } | ||
303 | |||
304 | /** | ||
269 | * Returns: | 305 | * Returns: |
270 | * 0: if the request should be continued. | 306 | * 0: if the request should be continued. |
271 | * 1: if the request will be going through error recovery. | 307 | * 1: if the request will be going through error recovery. |
272 | * 2: if the request should be ended. | 308 | * 2: if the request should be ended. |
273 | */ | 309 | */ |
274 | static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) | 310 | static int cdrom_decode_status(ide_drive_t *drive, u8 stat) |
275 | { | 311 | { |
276 | ide_hwif_t *hwif = drive->hwif; | 312 | ide_hwif_t *hwif = drive->hwif; |
277 | struct request *rq = hwif->rq; | 313 | struct request *rq = hwif->rq; |
278 | int stat, err, sense_key; | 314 | int err, sense_key, do_end_request = 0; |
279 | 315 | u8 quiet = rq->cmd_flags & REQ_QUIET; | |
280 | /* check for errors */ | ||
281 | stat = hwif->tp_ops->read_status(hwif); | ||
282 | |||
283 | if (stat_ret) | ||
284 | *stat_ret = stat; | ||
285 | |||
286 | if (OK_STAT(stat, good_stat, BAD_R_STAT)) | ||
287 | return 0; | ||
288 | 316 | ||
289 | /* get the IDE error register */ | 317 | /* get the IDE error register */ |
290 | err = ide_read_error(drive); | 318 | err = ide_read_error(drive); |
291 | sense_key = err >> 4; | 319 | sense_key = err >> 4; |
292 | 320 | ||
293 | ide_debug_log(IDE_DBG_RQ, "stat: 0x%x, good_stat: 0x%x, cmd[0]: 0x%x, " | 321 | ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, rq->cmd_type: 0x%x, err: 0x%x, " |
294 | "rq->cmd_type: 0x%x, err: 0x%x", | 322 | "stat 0x%x", |
295 | stat, good_stat, rq->cmd[0], rq->cmd_type, | 323 | rq->cmd[0], rq->cmd_type, err, stat); |
296 | err); | ||
297 | 324 | ||
298 | if (blk_sense_request(rq)) { | 325 | if (blk_sense_request(rq)) { |
299 | /* | 326 | /* |
@@ -303,151 +330,108 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) | |||
303 | */ | 330 | */ |
304 | rq->cmd_flags |= REQ_FAILED; | 331 | rq->cmd_flags |= REQ_FAILED; |
305 | return 2; | 332 | return 2; |
306 | } else if (blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) { | 333 | } |
307 | /* All other functions, except for READ. */ | ||
308 | 334 | ||
309 | /* | 335 | /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ |
310 | * if we have an error, pass back CHECK_CONDITION as the | 336 | if (blk_pc_request(rq) && !rq->errors) |
311 | * scsi status byte | 337 | rq->errors = SAM_STAT_CHECK_CONDITION; |
312 | */ | ||
313 | if (blk_pc_request(rq) && !rq->errors) | ||
314 | rq->errors = SAM_STAT_CHECK_CONDITION; | ||
315 | 338 | ||
316 | /* check for tray open */ | 339 | if (blk_noretry_request(rq)) |
317 | if (sense_key == NOT_READY) { | 340 | do_end_request = 1; |
318 | cdrom_saw_media_change(drive); | 341 | |
319 | } else if (sense_key == UNIT_ATTENTION) { | 342 | switch (sense_key) { |
320 | /* check for media change */ | 343 | case NOT_READY: |
344 | if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) { | ||
345 | if (ide_cd_breathe(drive, rq)) | ||
346 | return 1; | ||
347 | } else { | ||
321 | cdrom_saw_media_change(drive); | 348 | cdrom_saw_media_change(drive); |
322 | return 0; | 349 | |
323 | } else if (sense_key == ILLEGAL_REQUEST && | 350 | if (blk_fs_request(rq) && !quiet) |
324 | rq->cmd[0] == GPCMD_START_STOP_UNIT) { | 351 | printk(KERN_ERR PFX "%s: tray open\n", |
325 | /* | 352 | drive->name); |
326 | * Don't print error message for this condition-- | ||
327 | * SFF8090i indicates that 5/24/00 is the correct | ||
328 | * response to a request to close the tray if the | ||
329 | * drive doesn't have that capability. | ||
330 | * cdrom_log_sense() knows this! | ||
331 | */ | ||
332 | } else if (!(rq->cmd_flags & REQ_QUIET)) { | ||
333 | /* otherwise, print an error */ | ||
334 | ide_dump_status(drive, "packet command error", stat); | ||
335 | } | 353 | } |
354 | do_end_request = 1; | ||
355 | break; | ||
356 | case UNIT_ATTENTION: | ||
357 | cdrom_saw_media_change(drive); | ||
336 | 358 | ||
337 | rq->cmd_flags |= REQ_FAILED; | 359 | if (blk_fs_request(rq) == 0) |
360 | return 0; | ||
338 | 361 | ||
339 | /* | 362 | /* |
340 | * instead of playing games with moving completions around, | 363 | * Arrange to retry the request but be sure to give up if we've |
341 | * remove failed request completely and end it when the | 364 | * retried too many times. |
342 | * request sense has completed | ||
343 | */ | 365 | */ |
344 | goto end_request; | 366 | if (++rq->errors > ERROR_MAX) |
345 | |||
346 | } else if (blk_fs_request(rq)) { | ||
347 | int do_end_request = 0; | ||
348 | |||
349 | /* handle errors from READ and WRITE requests */ | ||
350 | |||
351 | if (blk_noretry_request(rq)) | ||
352 | do_end_request = 1; | 367 | do_end_request = 1; |
353 | 368 | break; | |
354 | if (sense_key == NOT_READY) { | 369 | case ILLEGAL_REQUEST: |
355 | /* tray open */ | 370 | /* |
356 | if (rq_data_dir(rq) == READ) { | 371 | * Don't print error message for this condition -- SFF8090i |
357 | cdrom_saw_media_change(drive); | 372 | * indicates that 5/24/00 is the correct response to a request |
358 | 373 | * to close the tray if the drive doesn't have that capability. | |
359 | /* fail the request */ | 374 | * |
360 | printk(KERN_ERR PFX "%s: tray open\n", | 375 | * cdrom_log_sense() knows this! |
361 | drive->name); | 376 | */ |
362 | do_end_request = 1; | 377 | if (rq->cmd[0] == GPCMD_START_STOP_UNIT) |
363 | } else { | 378 | break; |
364 | struct cdrom_info *info = drive->driver_data; | 379 | /* fall-through */ |
365 | 380 | case DATA_PROTECT: | |
366 | /* | 381 | /* |
367 | * Allow the drive 5 seconds to recover, some | 382 | * No point in retrying after an illegal request or data |
368 | * devices will return this error while flushing | 383 | * protect error. |
369 | * data from cache. | 384 | */ |
370 | */ | 385 | if (!quiet) |
371 | if (!rq->errors) | ||
372 | info->write_timeout = jiffies + | ||
373 | ATAPI_WAIT_WRITE_BUSY; | ||
374 | rq->errors = 1; | ||
375 | if (time_after(jiffies, info->write_timeout)) | ||
376 | do_end_request = 1; | ||
377 | else { | ||
378 | struct request_queue *q = drive->queue; | ||
379 | unsigned long flags; | ||
380 | |||
381 | /* | ||
382 | * take a breather relying on the unplug | ||
383 | * timer to kick us again | ||
384 | */ | ||
385 | spin_lock_irqsave(q->queue_lock, flags); | ||
386 | blk_plug_device(q); | ||
387 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
388 | |||
389 | return 1; | ||
390 | } | ||
391 | } | ||
392 | } else if (sense_key == UNIT_ATTENTION) { | ||
393 | /* media change */ | ||
394 | cdrom_saw_media_change(drive); | ||
395 | |||
396 | /* | ||
397 | * Arrange to retry the request but be sure to give up | ||
398 | * if we've retried too many times. | ||
399 | */ | ||
400 | if (++rq->errors > ERROR_MAX) | ||
401 | do_end_request = 1; | ||
402 | } else if (sense_key == ILLEGAL_REQUEST || | ||
403 | sense_key == DATA_PROTECT) { | ||
404 | /* | ||
405 | * No point in retrying after an illegal request or data | ||
406 | * protect error. | ||
407 | */ | ||
408 | ide_dump_status(drive, "command error", stat); | 386 | ide_dump_status(drive, "command error", stat); |
409 | do_end_request = 1; | 387 | do_end_request = 1; |
410 | } else if (sense_key == MEDIUM_ERROR) { | 388 | break; |
411 | /* | 389 | case MEDIUM_ERROR: |
412 | * No point in re-trying a zillion times on a bad | 390 | /* |
413 | * sector. If we got here the error is not correctable. | 391 | * No point in re-trying a zillion times on a bad sector. |
414 | */ | 392 | * If we got here the error is not correctable. |
415 | ide_dump_status(drive, "media error (bad sector)", | 393 | */ |
394 | if (!quiet) | ||
395 | ide_dump_status(drive, "media error " | ||
396 | "(bad sector)", stat); | ||
397 | do_end_request = 1; | ||
398 | break; | ||
399 | case BLANK_CHECK: | ||
400 | /* disk appears blank? */ | ||
401 | if (!quiet) | ||
402 | ide_dump_status(drive, "media error (blank)", | ||
416 | stat); | 403 | stat); |
417 | do_end_request = 1; | 404 | do_end_request = 1; |
418 | } else if (sense_key == BLANK_CHECK) { | 405 | break; |
419 | /* disk appears blank ?? */ | 406 | default: |
420 | ide_dump_status(drive, "media error (blank)", stat); | 407 | if (blk_fs_request(rq) == 0) |
421 | do_end_request = 1; | 408 | break; |
422 | } else if ((err & ~ATA_ABORTED) != 0) { | 409 | if (err & ~ATA_ABORTED) { |
423 | /* go to the default handler for other errors */ | 410 | /* go to the default handler for other errors */ |
424 | ide_error(drive, "cdrom_decode_status", stat); | 411 | ide_error(drive, "cdrom_decode_status", stat); |
425 | return 1; | 412 | return 1; |
426 | } else if ((++rq->errors > ERROR_MAX)) { | 413 | } else if (++rq->errors > ERROR_MAX) |
427 | /* we've racked up too many retries, abort */ | 414 | /* we've racked up too many retries, abort */ |
428 | do_end_request = 1; | 415 | do_end_request = 1; |
429 | } | 416 | } |
430 | |||
431 | /* | ||
432 | * End a request through request sense analysis when we have | ||
433 | * sense data. We need this in order to perform end of media | ||
434 | * processing. | ||
435 | */ | ||
436 | if (do_end_request) | ||
437 | goto end_request; | ||
438 | 417 | ||
439 | /* | 418 | if (blk_fs_request(rq) == 0) { |
440 | * If we got a CHECK_CONDITION status, queue | 419 | rq->cmd_flags |= REQ_FAILED; |
441 | * a request sense command. | 420 | do_end_request = 1; |
442 | */ | ||
443 | if (stat & ATA_ERR) | ||
444 | cdrom_queue_request_sense(drive, NULL, NULL); | ||
445 | return 1; | ||
446 | } else { | ||
447 | blk_dump_rq_flags(rq, PFX "bad rq"); | ||
448 | return 2; | ||
449 | } | 421 | } |
450 | 422 | ||
423 | /* | ||
424 | * End a request through request sense analysis when we have sense data. | ||
425 | * We need this in order to perform end of media processing. | ||
426 | */ | ||
427 | if (do_end_request) | ||
428 | goto end_request; | ||
429 | |||
430 | /* if we got a CHECK_CONDITION status, queue a request sense command */ | ||
431 | if (stat & ATA_ERR) | ||
432 | cdrom_queue_request_sense(drive, NULL, NULL); | ||
433 | return 1; | ||
434 | |||
451 | end_request: | 435 | end_request: |
452 | if (stat & ATA_ERR) { | 436 | if (stat & ATA_ERR) { |
453 | struct request_queue *q = drive->queue; | 437 | struct request_queue *q = drive->queue; |
@@ -624,15 +608,14 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
624 | struct ide_cmd *cmd = &hwif->cmd; | 608 | struct ide_cmd *cmd = &hwif->cmd; |
625 | struct request *rq = hwif->rq; | 609 | struct request *rq = hwif->rq; |
626 | ide_expiry_t *expiry = NULL; | 610 | ide_expiry_t *expiry = NULL; |
627 | int dma_error = 0, dma, stat, thislen, uptodate = 0; | 611 | int dma_error = 0, dma, thislen, uptodate = 0; |
628 | int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc, nsectors; | 612 | int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc, nsectors; |
629 | int sense = blk_sense_request(rq); | 613 | int sense = blk_sense_request(rq); |
630 | unsigned int timeout; | 614 | unsigned int timeout; |
631 | u16 len; | 615 | u16 len; |
632 | u8 ireason; | 616 | u8 ireason, stat; |
633 | 617 | ||
634 | ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x", | 618 | ide_debug_log(IDE_DBG_PC, "cmd: 0x%x, write: 0x%x", rq->cmd[0], write); |
635 | rq->cmd[0], write); | ||
636 | 619 | ||
637 | /* check for errors */ | 620 | /* check for errors */ |
638 | dma = drive->dma; | 621 | dma = drive->dma; |
@@ -648,11 +631,16 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
648 | } | 631 | } |
649 | } | 632 | } |
650 | 633 | ||
651 | rc = cdrom_decode_status(drive, 0, &stat); | 634 | /* check status */ |
652 | if (rc) { | 635 | stat = hwif->tp_ops->read_status(hwif); |
653 | if (rc == 2) | 636 | |
654 | goto out_end; | 637 | if (!OK_STAT(stat, 0, BAD_R_STAT)) { |
655 | return ide_stopped; | 638 | rc = cdrom_decode_status(drive, stat); |
639 | if (rc) { | ||
640 | if (rc == 2) | ||
641 | goto out_end; | ||
642 | return ide_stopped; | ||
643 | } | ||
656 | } | 644 | } |
657 | 645 | ||
658 | /* using dma, transfer is complete now */ | 646 | /* using dma, transfer is complete now */ |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index c998cf8e971a..a9fbe2c31210 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -97,35 +97,38 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, | |||
97 | } | 97 | } |
98 | 98 | ||
99 | memset(&cmd, 0, sizeof(cmd)); | 99 | memset(&cmd, 0, sizeof(cmd)); |
100 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 100 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
101 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
101 | 102 | ||
102 | if (drive->dev_flags & IDE_DFLAG_LBA) { | 103 | if (drive->dev_flags & IDE_DFLAG_LBA) { |
103 | if (lba48) { | 104 | if (lba48) { |
104 | pr_debug("%s: LBA=0x%012llx\n", drive->name, | 105 | pr_debug("%s: LBA=0x%012llx\n", drive->name, |
105 | (unsigned long long)block); | 106 | (unsigned long long)block); |
106 | 107 | ||
107 | tf->hob_nsect = (nsectors >> 8) & 0xff; | ||
108 | tf->hob_lbal = (u8)(block >> 24); | ||
109 | if (sizeof(block) != 4) { | ||
110 | tf->hob_lbam = (u8)((u64)block >> 32); | ||
111 | tf->hob_lbah = (u8)((u64)block >> 40); | ||
112 | } | ||
113 | |||
114 | tf->nsect = nsectors & 0xff; | 108 | tf->nsect = nsectors & 0xff; |
115 | tf->lbal = (u8) block; | 109 | tf->lbal = (u8) block; |
116 | tf->lbam = (u8)(block >> 8); | 110 | tf->lbam = (u8)(block >> 8); |
117 | tf->lbah = (u8)(block >> 16); | 111 | tf->lbah = (u8)(block >> 16); |
112 | tf->device = ATA_LBA; | ||
118 | 113 | ||
119 | cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); | 114 | tf = &cmd.hob; |
115 | tf->nsect = (nsectors >> 8) & 0xff; | ||
116 | tf->lbal = (u8)(block >> 24); | ||
117 | if (sizeof(block) != 4) { | ||
118 | tf->lbam = (u8)((u64)block >> 32); | ||
119 | tf->lbah = (u8)((u64)block >> 40); | ||
120 | } | ||
121 | |||
122 | cmd.valid.out.hob = IDE_VALID_OUT_HOB; | ||
123 | cmd.valid.in.hob = IDE_VALID_IN_HOB; | ||
124 | cmd.tf_flags |= IDE_TFLAG_LBA48; | ||
120 | } else { | 125 | } else { |
121 | tf->nsect = nsectors & 0xff; | 126 | tf->nsect = nsectors & 0xff; |
122 | tf->lbal = block; | 127 | tf->lbal = block; |
123 | tf->lbam = block >>= 8; | 128 | tf->lbam = block >>= 8; |
124 | tf->lbah = block >>= 8; | 129 | tf->lbah = block >>= 8; |
125 | tf->device = (block >> 8) & 0xf; | 130 | tf->device = ((block >> 8) & 0xf) | ATA_LBA; |
126 | } | 131 | } |
127 | |||
128 | tf->device |= ATA_LBA; | ||
129 | } else { | 132 | } else { |
130 | unsigned int sect, head, cyl, track; | 133 | unsigned int sect, head, cyl, track; |
131 | 134 | ||
@@ -220,15 +223,19 @@ static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48) | |||
220 | tf->command = ATA_CMD_READ_NATIVE_MAX; | 223 | tf->command = ATA_CMD_READ_NATIVE_MAX; |
221 | tf->device = ATA_LBA; | 224 | tf->device = ATA_LBA; |
222 | 225 | ||
223 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 226 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
224 | if (lba48) | 227 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; |
225 | cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); | 228 | if (lba48) { |
229 | cmd.valid.out.hob = IDE_VALID_OUT_HOB; | ||
230 | cmd.valid.in.hob = IDE_VALID_IN_HOB; | ||
231 | cmd.tf_flags = IDE_TFLAG_LBA48; | ||
232 | } | ||
226 | 233 | ||
227 | ide_no_data_taskfile(drive, &cmd); | 234 | ide_no_data_taskfile(drive, &cmd); |
228 | 235 | ||
229 | /* if OK, compute maximum address value */ | 236 | /* if OK, compute maximum address value */ |
230 | if (!(tf->status & ATA_ERR)) | 237 | if (!(tf->status & ATA_ERR)) |
231 | addr = ide_get_lba_addr(tf, lba48) + 1; | 238 | addr = ide_get_lba_addr(&cmd, lba48) + 1; |
232 | 239 | ||
233 | return addr; | 240 | return addr; |
234 | } | 241 | } |
@@ -250,9 +257,9 @@ static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48) | |||
250 | tf->lbam = (addr_req >>= 8) & 0xff; | 257 | tf->lbam = (addr_req >>= 8) & 0xff; |
251 | tf->lbah = (addr_req >>= 8) & 0xff; | 258 | tf->lbah = (addr_req >>= 8) & 0xff; |
252 | if (lba48) { | 259 | if (lba48) { |
253 | tf->hob_lbal = (addr_req >>= 8) & 0xff; | 260 | cmd.hob.lbal = (addr_req >>= 8) & 0xff; |
254 | tf->hob_lbam = (addr_req >>= 8) & 0xff; | 261 | cmd.hob.lbam = (addr_req >>= 8) & 0xff; |
255 | tf->hob_lbah = (addr_req >>= 8) & 0xff; | 262 | cmd.hob.lbah = (addr_req >>= 8) & 0xff; |
256 | tf->command = ATA_CMD_SET_MAX_EXT; | 263 | tf->command = ATA_CMD_SET_MAX_EXT; |
257 | } else { | 264 | } else { |
258 | tf->device = (addr_req >>= 8) & 0x0f; | 265 | tf->device = (addr_req >>= 8) & 0x0f; |
@@ -260,15 +267,19 @@ static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48) | |||
260 | } | 267 | } |
261 | tf->device |= ATA_LBA; | 268 | tf->device |= ATA_LBA; |
262 | 269 | ||
263 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 270 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
264 | if (lba48) | 271 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; |
265 | cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); | 272 | if (lba48) { |
273 | cmd.valid.out.hob = IDE_VALID_OUT_HOB; | ||
274 | cmd.valid.in.hob = IDE_VALID_IN_HOB; | ||
275 | cmd.tf_flags = IDE_TFLAG_LBA48; | ||
276 | } | ||
266 | 277 | ||
267 | ide_no_data_taskfile(drive, &cmd); | 278 | ide_no_data_taskfile(drive, &cmd); |
268 | 279 | ||
269 | /* if OK, compute maximum address value */ | 280 | /* if OK, compute maximum address value */ |
270 | if (!(tf->status & ATA_ERR)) | 281 | if (!(tf->status & ATA_ERR)) |
271 | addr_set = ide_get_lba_addr(tf, lba48) + 1; | 282 | addr_set = ide_get_lba_addr(&cmd, lba48) + 1; |
272 | 283 | ||
273 | return addr_set; | 284 | return addr_set; |
274 | } | 285 | } |
@@ -395,8 +406,8 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) | |||
395 | cmd->tf.command = ATA_CMD_FLUSH_EXT; | 406 | cmd->tf.command = ATA_CMD_FLUSH_EXT; |
396 | else | 407 | else |
397 | cmd->tf.command = ATA_CMD_FLUSH; | 408 | cmd->tf.command = ATA_CMD_FLUSH; |
398 | cmd->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE | | 409 | cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
399 | IDE_TFLAG_DYN; | 410 | cmd->tf_flags = IDE_TFLAG_DYN; |
400 | cmd->protocol = ATA_PROT_NODATA; | 411 | cmd->protocol = ATA_PROT_NODATA; |
401 | 412 | ||
402 | rq->cmd_type = REQ_TYPE_ATA_TASKFILE; | 413 | rq->cmd_type = REQ_TYPE_ATA_TASKFILE; |
@@ -457,7 +468,8 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect) | |||
457 | cmd.tf.feature = feature; | 468 | cmd.tf.feature = feature; |
458 | cmd.tf.nsect = nsect; | 469 | cmd.tf.nsect = nsect; |
459 | cmd.tf.command = ATA_CMD_SET_FEATURES; | 470 | cmd.tf.command = ATA_CMD_SET_FEATURES; |
460 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 471 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
472 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
461 | 473 | ||
462 | return ide_no_data_taskfile(drive, &cmd); | 474 | return ide_no_data_taskfile(drive, &cmd); |
463 | } | 475 | } |
@@ -533,7 +545,8 @@ static int do_idedisk_flushcache(ide_drive_t *drive) | |||
533 | cmd.tf.command = ATA_CMD_FLUSH_EXT; | 545 | cmd.tf.command = ATA_CMD_FLUSH_EXT; |
534 | else | 546 | else |
535 | cmd.tf.command = ATA_CMD_FLUSH; | 547 | cmd.tf.command = ATA_CMD_FLUSH; |
536 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 548 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
549 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
537 | 550 | ||
538 | return ide_no_data_taskfile(drive, &cmd); | 551 | return ide_no_data_taskfile(drive, &cmd); |
539 | } | 552 | } |
@@ -715,7 +728,8 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk, | |||
715 | 728 | ||
716 | memset(&cmd, 0, sizeof(cmd)); | 729 | memset(&cmd, 0, sizeof(cmd)); |
717 | cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; | 730 | cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; |
718 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 731 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
732 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
719 | 733 | ||
720 | ret = ide_no_data_taskfile(drive, &cmd); | 734 | ret = ide_no_data_taskfile(drive, &cmd); |
721 | 735 | ||
diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c index eaea3bef2073..19f263bf0a9e 100644 --- a/drivers/ide/ide-disk_proc.c +++ b/drivers/ide/ide-disk_proc.c | |||
@@ -13,7 +13,8 @@ static int smart_enable(ide_drive_t *drive) | |||
13 | tf->lbam = ATA_SMART_LBAM_PASS; | 13 | tf->lbam = ATA_SMART_LBAM_PASS; |
14 | tf->lbah = ATA_SMART_LBAH_PASS; | 14 | tf->lbah = ATA_SMART_LBAH_PASS; |
15 | tf->command = ATA_CMD_SMART; | 15 | tf->command = ATA_CMD_SMART; |
16 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 16 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
17 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
17 | 18 | ||
18 | return ide_no_data_taskfile(drive, &cmd); | 19 | return ide_no_data_taskfile(drive, &cmd); |
19 | } | 20 | } |
@@ -29,7 +30,8 @@ static int get_smart_data(ide_drive_t *drive, u8 *buf, u8 sub_cmd) | |||
29 | tf->lbam = ATA_SMART_LBAM_PASS; | 30 | tf->lbam = ATA_SMART_LBAM_PASS; |
30 | tf->lbah = ATA_SMART_LBAH_PASS; | 31 | tf->lbah = ATA_SMART_LBAH_PASS; |
31 | tf->command = ATA_CMD_SMART; | 32 | tf->command = ATA_CMD_SMART; |
32 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 33 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
34 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
33 | cmd.protocol = ATA_PROT_PIO; | 35 | cmd.protocol = ATA_PROT_PIO; |
34 | 36 | ||
35 | return ide_raw_taskfile(drive, &cmd, buf, 1); | 37 | return ide_raw_taskfile(drive, &cmd, buf, 1); |
diff --git a/drivers/ide/ide-dma-sff.c b/drivers/ide/ide-dma-sff.c index 16fc46edc32d..e4cdf78cc3e9 100644 --- a/drivers/ide/ide-dma-sff.c +++ b/drivers/ide/ide-dma-sff.c | |||
@@ -277,8 +277,6 @@ void ide_dma_start(ide_drive_t *drive) | |||
277 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); | 277 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
278 | outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); | 278 | outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); |
279 | } | 279 | } |
280 | |||
281 | wmb(); | ||
282 | } | 280 | } |
283 | EXPORT_SYMBOL_GPL(ide_dma_start); | 281 | EXPORT_SYMBOL_GPL(ide_dma_start); |
284 | 282 | ||
@@ -286,7 +284,7 @@ EXPORT_SYMBOL_GPL(ide_dma_start); | |||
286 | int ide_dma_end(ide_drive_t *drive) | 284 | int ide_dma_end(ide_drive_t *drive) |
287 | { | 285 | { |
288 | ide_hwif_t *hwif = drive->hwif; | 286 | ide_hwif_t *hwif = drive->hwif; |
289 | u8 dma_stat = 0, dma_cmd = 0, mask; | 287 | u8 dma_stat = 0, dma_cmd = 0; |
290 | 288 | ||
291 | /* stop DMA */ | 289 | /* stop DMA */ |
292 | if (hwif->host_flags & IDE_HFLAG_MMIO) { | 290 | if (hwif->host_flags & IDE_HFLAG_MMIO) { |
@@ -304,11 +302,10 @@ int ide_dma_end(ide_drive_t *drive) | |||
304 | /* clear INTR & ERROR bits */ | 302 | /* clear INTR & ERROR bits */ |
305 | ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); | 303 | ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); |
306 | 304 | ||
307 | wmb(); | 305 | #define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR) |
308 | 306 | ||
309 | /* verify good DMA status */ | 307 | /* verify good DMA status */ |
310 | mask = ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR; | 308 | if ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR) |
311 | if ((dma_stat & mask) != ATA_DMA_INTR) | ||
312 | return 0x10 | dma_stat; | 309 | return 0x10 | dma_stat; |
313 | return 0; | 310 | return 0; |
314 | } | 311 | } |
diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c index dac9a6d44963..c06ebdc4a130 100644 --- a/drivers/ide/ide-h8300.c +++ b/drivers/ide/ide-h8300.c | |||
@@ -22,103 +22,6 @@ | |||
22 | (r); \ | 22 | (r); \ |
23 | }) | 23 | }) |
24 | 24 | ||
25 | static void mm_outw(u16 d, unsigned long a) | ||
26 | { | ||
27 | __asm__("mov.b %w0,r2h\n\t" | ||
28 | "mov.b %x0,r2l\n\t" | ||
29 | "mov.w r2,@%1" | ||
30 | : | ||
31 | :"r"(d),"r"(a) | ||
32 | :"er2"); | ||
33 | } | ||
34 | |||
35 | static u16 mm_inw(unsigned long a) | ||
36 | { | ||
37 | register u16 r __asm__("er0"); | ||
38 | __asm__("mov.w @%1,r2\n\t" | ||
39 | "mov.b r2l,%x0\n\t" | ||
40 | "mov.b r2h,%w0" | ||
41 | :"=r"(r) | ||
42 | :"r"(a) | ||
43 | :"er2"); | ||
44 | return r; | ||
45 | } | ||
46 | |||
47 | static void h8300_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) | ||
48 | { | ||
49 | ide_hwif_t *hwif = drive->hwif; | ||
50 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
51 | struct ide_taskfile *tf = &cmd->tf; | ||
52 | u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; | ||
53 | |||
54 | if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) | ||
55 | HIHI = 0xFF; | ||
56 | |||
57 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) | ||
58 | outb(tf->hob_feature, io_ports->feature_addr); | ||
59 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) | ||
60 | outb(tf->hob_nsect, io_ports->nsect_addr); | ||
61 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) | ||
62 | outb(tf->hob_lbal, io_ports->lbal_addr); | ||
63 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) | ||
64 | outb(tf->hob_lbam, io_ports->lbam_addr); | ||
65 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) | ||
66 | outb(tf->hob_lbah, io_ports->lbah_addr); | ||
67 | |||
68 | if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) | ||
69 | outb(tf->feature, io_ports->feature_addr); | ||
70 | if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) | ||
71 | outb(tf->nsect, io_ports->nsect_addr); | ||
72 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) | ||
73 | outb(tf->lbal, io_ports->lbal_addr); | ||
74 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) | ||
75 | outb(tf->lbam, io_ports->lbam_addr); | ||
76 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) | ||
77 | outb(tf->lbah, io_ports->lbah_addr); | ||
78 | |||
79 | if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) | ||
80 | outb((tf->device & HIHI) | drive->select, | ||
81 | io_ports->device_addr); | ||
82 | } | ||
83 | |||
84 | static void h8300_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) | ||
85 | { | ||
86 | ide_hwif_t *hwif = drive->hwif; | ||
87 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
88 | struct ide_taskfile *tf = &cmd->tf; | ||
89 | |||
90 | /* be sure we're looking at the low order bits */ | ||
91 | outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
92 | |||
93 | if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) | ||
94 | tf->error = inb(io_ports->feature_addr); | ||
95 | if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) | ||
96 | tf->nsect = inb(io_ports->nsect_addr); | ||
97 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) | ||
98 | tf->lbal = inb(io_ports->lbal_addr); | ||
99 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) | ||
100 | tf->lbam = inb(io_ports->lbam_addr); | ||
101 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) | ||
102 | tf->lbah = inb(io_ports->lbah_addr); | ||
103 | if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) | ||
104 | tf->device = inb(io_ports->device_addr); | ||
105 | |||
106 | if (cmd->tf_flags & IDE_TFLAG_LBA48) { | ||
107 | outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
108 | |||
109 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) | ||
110 | tf->hob_error = inb(io_ports->feature_addr); | ||
111 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) | ||
112 | tf->hob_nsect = inb(io_ports->nsect_addr); | ||
113 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) | ||
114 | tf->hob_lbal = inb(io_ports->lbal_addr); | ||
115 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) | ||
116 | tf->hob_lbam = inb(io_ports->lbam_addr); | ||
117 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) | ||
118 | tf->hob_lbah = inb(io_ports->lbah_addr); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static void mm_outsw(unsigned long addr, void *buf, u32 len) | 25 | static void mm_outsw(unsigned long addr, void *buf, u32 len) |
123 | { | 26 | { |
124 | unsigned short *bp = (unsigned short *)buf; | 27 | unsigned short *bp = (unsigned short *)buf; |
@@ -152,8 +55,8 @@ static const struct ide_tp_ops h8300_tp_ops = { | |||
152 | .write_devctl = ide_write_devctl, | 55 | .write_devctl = ide_write_devctl, |
153 | 56 | ||
154 | .dev_select = ide_dev_select, | 57 | .dev_select = ide_dev_select, |
155 | .tf_load = h8300_tf_load, | 58 | .tf_load = ide_tf_load, |
156 | .tf_read = h8300_tf_read, | 59 | .tf_read = ide_tf_read, |
157 | 60 | ||
158 | .input_data = h8300_input_data, | 61 | .input_data = h8300_input_data, |
159 | .output_data = h8300_output_data, | 62 | .output_data = h8300_output_data, |
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c index 9cac281d82c4..46721c454518 100644 --- a/drivers/ide/ide-io-std.c +++ b/drivers/ide/ide-io-std.c | |||
@@ -85,98 +85,57 @@ void ide_dev_select(ide_drive_t *drive) | |||
85 | } | 85 | } |
86 | EXPORT_SYMBOL_GPL(ide_dev_select); | 86 | EXPORT_SYMBOL_GPL(ide_dev_select); |
87 | 87 | ||
88 | void ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) | 88 | void ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) |
89 | { | 89 | { |
90 | ide_hwif_t *hwif = drive->hwif; | 90 | ide_hwif_t *hwif = drive->hwif; |
91 | struct ide_io_ports *io_ports = &hwif->io_ports; | 91 | struct ide_io_ports *io_ports = &hwif->io_ports; |
92 | struct ide_taskfile *tf = &cmd->tf; | ||
93 | void (*tf_outb)(u8 addr, unsigned long port); | 92 | void (*tf_outb)(u8 addr, unsigned long port); |
94 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | 93 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; |
95 | u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; | ||
96 | 94 | ||
97 | if (mmio) | 95 | if (mmio) |
98 | tf_outb = ide_mm_outb; | 96 | tf_outb = ide_mm_outb; |
99 | else | 97 | else |
100 | tf_outb = ide_outb; | 98 | tf_outb = ide_outb; |
101 | 99 | ||
102 | if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) | 100 | if (valid & IDE_VALID_FEATURE) |
103 | HIHI = 0xFF; | ||
104 | |||
105 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) | ||
106 | tf_outb(tf->hob_feature, io_ports->feature_addr); | ||
107 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) | ||
108 | tf_outb(tf->hob_nsect, io_ports->nsect_addr); | ||
109 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) | ||
110 | tf_outb(tf->hob_lbal, io_ports->lbal_addr); | ||
111 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) | ||
112 | tf_outb(tf->hob_lbam, io_ports->lbam_addr); | ||
113 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) | ||
114 | tf_outb(tf->hob_lbah, io_ports->lbah_addr); | ||
115 | |||
116 | if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) | ||
117 | tf_outb(tf->feature, io_ports->feature_addr); | 101 | tf_outb(tf->feature, io_ports->feature_addr); |
118 | if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) | 102 | if (valid & IDE_VALID_NSECT) |
119 | tf_outb(tf->nsect, io_ports->nsect_addr); | 103 | tf_outb(tf->nsect, io_ports->nsect_addr); |
120 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) | 104 | if (valid & IDE_VALID_LBAL) |
121 | tf_outb(tf->lbal, io_ports->lbal_addr); | 105 | tf_outb(tf->lbal, io_ports->lbal_addr); |
122 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) | 106 | if (valid & IDE_VALID_LBAM) |
123 | tf_outb(tf->lbam, io_ports->lbam_addr); | 107 | tf_outb(tf->lbam, io_ports->lbam_addr); |
124 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) | 108 | if (valid & IDE_VALID_LBAH) |
125 | tf_outb(tf->lbah, io_ports->lbah_addr); | 109 | tf_outb(tf->lbah, io_ports->lbah_addr); |
126 | 110 | if (valid & IDE_VALID_DEVICE) | |
127 | if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) | 111 | tf_outb(tf->device, io_ports->device_addr); |
128 | tf_outb((tf->device & HIHI) | drive->select, | ||
129 | io_ports->device_addr); | ||
130 | } | 112 | } |
131 | EXPORT_SYMBOL_GPL(ide_tf_load); | 113 | EXPORT_SYMBOL_GPL(ide_tf_load); |
132 | 114 | ||
133 | void ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) | 115 | void ide_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) |
134 | { | 116 | { |
135 | ide_hwif_t *hwif = drive->hwif; | 117 | ide_hwif_t *hwif = drive->hwif; |
136 | struct ide_io_ports *io_ports = &hwif->io_ports; | 118 | struct ide_io_ports *io_ports = &hwif->io_ports; |
137 | struct ide_taskfile *tf = &cmd->tf; | ||
138 | void (*tf_outb)(u8 addr, unsigned long port); | ||
139 | u8 (*tf_inb)(unsigned long port); | 119 | u8 (*tf_inb)(unsigned long port); |
140 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | 120 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; |
141 | 121 | ||
142 | if (mmio) { | 122 | if (mmio) |
143 | tf_outb = ide_mm_outb; | ||
144 | tf_inb = ide_mm_inb; | 123 | tf_inb = ide_mm_inb; |
145 | } else { | 124 | else |
146 | tf_outb = ide_outb; | ||
147 | tf_inb = ide_inb; | 125 | tf_inb = ide_inb; |
148 | } | ||
149 | |||
150 | /* be sure we're looking at the low order bits */ | ||
151 | tf_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
152 | 126 | ||
153 | if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) | 127 | if (valid & IDE_VALID_ERROR) |
154 | tf->error = tf_inb(io_ports->feature_addr); | 128 | tf->error = tf_inb(io_ports->feature_addr); |
155 | if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) | 129 | if (valid & IDE_VALID_NSECT) |
156 | tf->nsect = tf_inb(io_ports->nsect_addr); | 130 | tf->nsect = tf_inb(io_ports->nsect_addr); |
157 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) | 131 | if (valid & IDE_VALID_LBAL) |
158 | tf->lbal = tf_inb(io_ports->lbal_addr); | 132 | tf->lbal = tf_inb(io_ports->lbal_addr); |
159 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) | 133 | if (valid & IDE_VALID_LBAM) |
160 | tf->lbam = tf_inb(io_ports->lbam_addr); | 134 | tf->lbam = tf_inb(io_ports->lbam_addr); |
161 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) | 135 | if (valid & IDE_VALID_LBAH) |
162 | tf->lbah = tf_inb(io_ports->lbah_addr); | 136 | tf->lbah = tf_inb(io_ports->lbah_addr); |
163 | if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) | 137 | if (valid & IDE_VALID_DEVICE) |
164 | tf->device = tf_inb(io_ports->device_addr); | 138 | tf->device = tf_inb(io_ports->device_addr); |
165 | |||
166 | if (cmd->tf_flags & IDE_TFLAG_LBA48) { | ||
167 | tf_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
168 | |||
169 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) | ||
170 | tf->hob_error = tf_inb(io_ports->feature_addr); | ||
171 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) | ||
172 | tf->hob_nsect = tf_inb(io_ports->nsect_addr); | ||
173 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) | ||
174 | tf->hob_lbal = tf_inb(io_ports->lbal_addr); | ||
175 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) | ||
176 | tf->hob_lbam = tf_inb(io_ports->lbam_addr); | ||
177 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) | ||
178 | tf->hob_lbah = tf_inb(io_ports->lbah_addr); | ||
179 | } | ||
180 | } | 139 | } |
181 | EXPORT_SYMBOL_GPL(ide_tf_read); | 140 | EXPORT_SYMBOL_GPL(ide_tf_read); |
182 | 141 | ||
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 1deb6d29b186..35dc38d3b2c5 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -86,27 +86,30 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) | |||
86 | 86 | ||
87 | tp_ops->input_data(drive, cmd, data, 2); | 87 | tp_ops->input_data(drive, cmd, data, 2); |
88 | 88 | ||
89 | tf->data = data[0]; | 89 | cmd->tf.data = data[0]; |
90 | tf->hob_data = data[1]; | 90 | cmd->hob.data = data[1]; |
91 | } | 91 | } |
92 | 92 | ||
93 | tp_ops->tf_read(drive, cmd); | 93 | ide_tf_readback(drive, cmd); |
94 | 94 | ||
95 | if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) && | 95 | if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) && |
96 | tf_cmd == ATA_CMD_IDLEIMMEDIATE) { | 96 | tf_cmd == ATA_CMD_IDLEIMMEDIATE) { |
97 | if (tf->lbal != 0xc4) { | 97 | if (tf->lbal != 0xc4) { |
98 | printk(KERN_ERR "%s: head unload failed!\n", | 98 | printk(KERN_ERR "%s: head unload failed!\n", |
99 | drive->name); | 99 | drive->name); |
100 | ide_tf_dump(drive->name, tf); | 100 | ide_tf_dump(drive->name, cmd); |
101 | } else | 101 | } else |
102 | drive->dev_flags |= IDE_DFLAG_PARKED; | 102 | drive->dev_flags |= IDE_DFLAG_PARKED; |
103 | } | 103 | } |
104 | 104 | ||
105 | if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) | 105 | if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { |
106 | memcpy(rq->special, cmd, sizeof(*cmd)); | 106 | struct ide_cmd *orig_cmd = rq->special; |
107 | 107 | ||
108 | if (cmd->tf_flags & IDE_TFLAG_DYN) | 108 | if (cmd->tf_flags & IDE_TFLAG_DYN) |
109 | kfree(cmd); | 109 | kfree(orig_cmd); |
110 | else | ||
111 | memcpy(orig_cmd, cmd, sizeof(*cmd)); | ||
112 | } | ||
110 | } | 113 | } |
111 | 114 | ||
112 | /* obsolete, blk_rq_bytes() should be used instead */ | 115 | /* obsolete, blk_rq_bytes() should be used instead */ |
@@ -205,8 +208,9 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) | |||
205 | return ide_stopped; | 208 | return ide_stopped; |
206 | } | 209 | } |
207 | 210 | ||
208 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | | 211 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
209 | IDE_TFLAG_CUSTOM_HANDLER; | 212 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; |
213 | cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER; | ||
210 | 214 | ||
211 | do_rw_taskfile(drive, &cmd); | 215 | do_rw_taskfile(drive, &cmd); |
212 | 216 | ||
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c index 770142767437..c1c25ebbaa1f 100644 --- a/drivers/ide/ide-ioctls.c +++ b/drivers/ide/ide-ioctls.c | |||
@@ -141,11 +141,12 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg) | |||
141 | tf->lbal = args[1]; | 141 | tf->lbal = args[1]; |
142 | tf->lbam = 0x4f; | 142 | tf->lbam = 0x4f; |
143 | tf->lbah = 0xc2; | 143 | tf->lbah = 0xc2; |
144 | cmd.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT; | 144 | cmd.valid.out.tf = IDE_VALID_OUT_TF; |
145 | cmd.valid.in.tf = IDE_VALID_NSECT; | ||
145 | } else { | 146 | } else { |
146 | tf->nsect = args[1]; | 147 | tf->nsect = args[1]; |
147 | cmd.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT | | 148 | cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT; |
148 | IDE_TFLAG_IN_NSECT; | 149 | cmd.valid.in.tf = IDE_VALID_NSECT; |
149 | } | 150 | } |
150 | tf->command = args[0]; | 151 | tf->command = args[0]; |
151 | cmd.protocol = args[3] ? ATA_PROT_PIO : ATA_PROT_NODATA; | 152 | cmd.protocol = args[3] ? ATA_PROT_PIO : ATA_PROT_NODATA; |
@@ -205,14 +206,15 @@ static int ide_task_ioctl(ide_drive_t *drive, unsigned long arg) | |||
205 | return -EFAULT; | 206 | return -EFAULT; |
206 | 207 | ||
207 | memset(&cmd, 0, sizeof(cmd)); | 208 | memset(&cmd, 0, sizeof(cmd)); |
208 | memcpy(&cmd.tf_array[7], &args[1], 6); | 209 | memcpy(&cmd.tf.feature, &args[1], 6); |
209 | cmd.tf.command = args[0]; | 210 | cmd.tf.command = args[0]; |
210 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 211 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
212 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
211 | 213 | ||
212 | err = ide_no_data_taskfile(drive, &cmd); | 214 | err = ide_no_data_taskfile(drive, &cmd); |
213 | 215 | ||
214 | args[0] = cmd.tf.command; | 216 | args[0] = cmd.tf.command; |
215 | memcpy(&args[1], &cmd.tf_array[7], 6); | 217 | memcpy(&args[1], &cmd.tf.feature, 6); |
216 | 218 | ||
217 | if (copy_to_user(p, args, 7)) | 219 | if (copy_to_user(p, args, 7)) |
218 | err = -EFAULT; | 220 | err = -EFAULT; |
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 27bb70ddd459..c19a221b1e18 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c | |||
@@ -37,14 +37,11 @@ void SELECT_MASK(ide_drive_t *drive, int mask) | |||
37 | 37 | ||
38 | u8 ide_read_error(ide_drive_t *drive) | 38 | u8 ide_read_error(ide_drive_t *drive) |
39 | { | 39 | { |
40 | struct ide_cmd cmd; | 40 | struct ide_taskfile tf; |
41 | 41 | ||
42 | memset(&cmd, 0, sizeof(cmd)); | 42 | drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_ERROR); |
43 | cmd.tf_flags = IDE_TFLAG_IN_ERROR; | ||
44 | 43 | ||
45 | drive->hwif->tp_ops->tf_read(drive, &cmd); | 44 | return tf.error; |
46 | |||
47 | return cmd.tf.error; | ||
48 | } | 45 | } |
49 | EXPORT_SYMBOL_GPL(ide_read_error); | 46 | EXPORT_SYMBOL_GPL(ide_read_error); |
50 | 47 | ||
@@ -312,10 +309,10 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
312 | { | 309 | { |
313 | ide_hwif_t *hwif = drive->hwif; | 310 | ide_hwif_t *hwif = drive->hwif; |
314 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | 311 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; |
312 | struct ide_taskfile tf; | ||
315 | u16 *id = drive->id, i; | 313 | u16 *id = drive->id, i; |
316 | int error = 0; | 314 | int error = 0; |
317 | u8 stat; | 315 | u8 stat; |
318 | struct ide_cmd cmd; | ||
319 | 316 | ||
320 | #ifdef CONFIG_BLK_DEV_IDEDMA | 317 | #ifdef CONFIG_BLK_DEV_IDEDMA |
321 | if (hwif->dma_ops) /* check if host supports DMA */ | 318 | if (hwif->dma_ops) /* check if host supports DMA */ |
@@ -347,12 +344,11 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
347 | udelay(1); | 344 | udelay(1); |
348 | tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS); | 345 | tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS); |
349 | 346 | ||
350 | memset(&cmd, 0, sizeof(cmd)); | 347 | memset(&tf, 0, sizeof(tf)); |
351 | cmd.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT; | 348 | tf.feature = SETFEATURES_XFER; |
352 | cmd.tf.feature = SETFEATURES_XFER; | 349 | tf.nsect = speed; |
353 | cmd.tf.nsect = speed; | ||
354 | 350 | ||
355 | tp_ops->tf_load(drive, &cmd); | 351 | tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT); |
356 | 352 | ||
357 | tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); | 353 | tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); |
358 | 354 | ||
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 217b7fdf2b17..56ff8c46c7d1 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c | |||
@@ -49,16 +49,17 @@ static void ide_dump_opcode(ide_drive_t *drive) | |||
49 | printk(KERN_CONT "0x%02x\n", cmd->tf.command); | 49 | printk(KERN_CONT "0x%02x\n", cmd->tf.command); |
50 | } | 50 | } |
51 | 51 | ||
52 | u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48) | 52 | u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48) |
53 | { | 53 | { |
54 | struct ide_taskfile *tf = &cmd->tf; | ||
54 | u32 high, low; | 55 | u32 high, low; |
55 | 56 | ||
56 | if (lba48) | ||
57 | high = (tf->hob_lbah << 16) | (tf->hob_lbam << 8) | | ||
58 | tf->hob_lbal; | ||
59 | else | ||
60 | high = tf->device & 0xf; | ||
61 | low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal; | 57 | low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal; |
58 | if (lba48) { | ||
59 | tf = &cmd->hob; | ||
60 | high = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal; | ||
61 | } else | ||
62 | high = tf->device & 0xf; | ||
62 | 63 | ||
63 | return ((u64)high << 24) | low; | 64 | return ((u64)high << 24) | low; |
64 | } | 65 | } |
@@ -71,17 +72,18 @@ static void ide_dump_sector(ide_drive_t *drive) | |||
71 | u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); | 72 | u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); |
72 | 73 | ||
73 | memset(&cmd, 0, sizeof(cmd)); | 74 | memset(&cmd, 0, sizeof(cmd)); |
74 | if (lba48) | 75 | if (lba48) { |
75 | cmd.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_HOB_LBA | | 76 | cmd.valid.in.tf = IDE_VALID_LBA; |
76 | IDE_TFLAG_LBA48; | 77 | cmd.valid.in.hob = IDE_VALID_LBA; |
77 | else | 78 | cmd.tf_flags = IDE_TFLAG_LBA48; |
78 | cmd.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; | 79 | } else |
80 | cmd.valid.in.tf = IDE_VALID_LBA | IDE_VALID_DEVICE; | ||
79 | 81 | ||
80 | drive->hwif->tp_ops->tf_read(drive, &cmd); | 82 | ide_tf_readback(drive, &cmd); |
81 | 83 | ||
82 | if (lba48 || (tf->device & ATA_LBA)) | 84 | if (lba48 || (tf->device & ATA_LBA)) |
83 | printk(KERN_CONT ", LBAsect=%llu", | 85 | printk(KERN_CONT ", LBAsect=%llu", |
84 | (unsigned long long)ide_get_lba_addr(tf, lba48)); | 86 | (unsigned long long)ide_get_lba_addr(&cmd, lba48)); |
85 | else | 87 | else |
86 | printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam, | 88 | printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam, |
87 | tf->device & 0xf, tf->lbal); | 89 | tf->device & 0xf, tf->lbal); |
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 9490b446519f..310d03f2b5b7 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c | |||
@@ -74,7 +74,8 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq) | |||
74 | tf->lbal = 0x4c; | 74 | tf->lbal = 0x4c; |
75 | tf->lbam = 0x4e; | 75 | tf->lbam = 0x4e; |
76 | tf->lbah = 0x55; | 76 | tf->lbah = 0x55; |
77 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 77 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
78 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
78 | } else /* cmd == REQ_UNPARK_HEADS */ | 79 | } else /* cmd == REQ_UNPARK_HEADS */ |
79 | tf->command = ATA_CMD_CHK_POWER; | 80 | tf->command = ATA_CMD_CHK_POWER; |
80 | 81 | ||
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index bb7858ebb7d1..0d8a151c0a01 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c | |||
@@ -163,7 +163,8 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) | |||
163 | return ide_stopped; | 163 | return ide_stopped; |
164 | 164 | ||
165 | out_do_tf: | 165 | out_do_tf: |
166 | cmd->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 166 | cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
167 | cmd->valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
167 | cmd->protocol = ATA_PROT_NODATA; | 168 | cmd->protocol = ATA_PROT_NODATA; |
168 | 169 | ||
169 | return do_rw_taskfile(drive, cmd); | 170 | return do_rw_taskfile(drive, cmd); |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index d8c1c3e735bb..7f264ed1141b 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -283,13 +283,11 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id) | |||
283 | * identify command to be sure of reply | 283 | * identify command to be sure of reply |
284 | */ | 284 | */ |
285 | if (cmd == ATA_CMD_ID_ATAPI) { | 285 | if (cmd == ATA_CMD_ID_ATAPI) { |
286 | struct ide_cmd cmd; | 286 | struct ide_taskfile tf; |
287 | 287 | ||
288 | memset(&cmd, 0, sizeof(cmd)); | 288 | memset(&tf, 0, sizeof(tf)); |
289 | /* disable DMA & overlap */ | 289 | /* disable DMA & overlap */ |
290 | cmd.tf_flags = IDE_TFLAG_OUT_FEATURE; | 290 | tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE); |
291 | |||
292 | tp_ops->tf_load(drive, &cmd); | ||
293 | } | 291 | } |
294 | 292 | ||
295 | /* ask drive for ID */ | 293 | /* ask drive for ID */ |
@@ -337,14 +335,11 @@ int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus) | |||
337 | 335 | ||
338 | static u8 ide_read_device(ide_drive_t *drive) | 336 | static u8 ide_read_device(ide_drive_t *drive) |
339 | { | 337 | { |
340 | struct ide_cmd cmd; | 338 | struct ide_taskfile tf; |
341 | |||
342 | memset(&cmd, 0, sizeof(cmd)); | ||
343 | cmd.tf_flags = IDE_TFLAG_IN_DEVICE; | ||
344 | 339 | ||
345 | drive->hwif->tp_ops->tf_read(drive, &cmd); | 340 | drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_DEVICE); |
346 | 341 | ||
347 | return cmd.tf.device; | 342 | return tf.device; |
348 | } | 343 | } |
349 | 344 | ||
350 | /** | 345 | /** |
@@ -1314,6 +1309,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws) | |||
1314 | host->get_lock = d->get_lock; | 1309 | host->get_lock = d->get_lock; |
1315 | host->release_lock = d->release_lock; | 1310 | host->release_lock = d->release_lock; |
1316 | host->host_flags = d->host_flags; | 1311 | host->host_flags = d->host_flags; |
1312 | host->irq_flags = d->irq_flags; | ||
1317 | } | 1313 | } |
1318 | 1314 | ||
1319 | return host; | 1315 | return host; |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 10a88bf3eefa..3242698832a4 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
@@ -204,8 +204,8 @@ static int set_xfer_rate (ide_drive_t *drive, int arg) | |||
204 | cmd.tf.command = ATA_CMD_SET_FEATURES; | 204 | cmd.tf.command = ATA_CMD_SET_FEATURES; |
205 | cmd.tf.feature = SETFEATURES_XFER; | 205 | cmd.tf.feature = SETFEATURES_XFER; |
206 | cmd.tf.nsect = (u8)arg; | 206 | cmd.tf.nsect = (u8)arg; |
207 | cmd.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT | | 207 | cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT; |
208 | IDE_TFLAG_IN_NSECT; | 208 | cmd.valid.in.tf = IDE_VALID_NSECT; |
209 | 209 | ||
210 | err = ide_no_data_taskfile(drive, &cmd); | 210 | err = ide_no_data_taskfile(drive, &cmd); |
211 | 211 | ||
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 243421ce40d0..4aa6223c11be 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c | |||
@@ -23,17 +23,33 @@ | |||
23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | #include <asm/io.h> | 24 | #include <asm/io.h> |
25 | 25 | ||
26 | void ide_tf_dump(const char *s, struct ide_taskfile *tf) | 26 | void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd) |
27 | { | ||
28 | ide_hwif_t *hwif = drive->hwif; | ||
29 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
30 | |||
31 | /* Be sure we're looking at the low order bytes */ | ||
32 | tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); | ||
33 | |||
34 | tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf); | ||
35 | |||
36 | if (cmd->tf_flags & IDE_TFLAG_LBA48) { | ||
37 | tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS); | ||
38 | |||
39 | tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob); | ||
40 | } | ||
41 | } | ||
42 | |||
43 | void ide_tf_dump(const char *s, struct ide_cmd *cmd) | ||
27 | { | 44 | { |
28 | #ifdef DEBUG | 45 | #ifdef DEBUG |
29 | printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " | 46 | printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " |
30 | "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", | 47 | "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", |
31 | s, tf->feature, tf->nsect, tf->lbal, | 48 | s, cmd->tf.feature, cmd->tf.nsect, |
32 | tf->lbam, tf->lbah, tf->device, tf->command); | 49 | cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah, |
33 | printk("%s: hob: nsect 0x%02x lbal 0x%02x " | 50 | cmd->tf.device, cmd->tf.command); |
34 | "lbam 0x%02x lbah 0x%02x\n", | 51 | printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n", |
35 | s, tf->hob_nsect, tf->hob_lbal, | 52 | s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah); |
36 | tf->hob_lbam, tf->hob_lbah); | ||
37 | #endif | 53 | #endif |
38 | } | 54 | } |
39 | 55 | ||
@@ -47,7 +63,8 @@ int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) | |||
47 | cmd.tf.command = ATA_CMD_ID_ATA; | 63 | cmd.tf.command = ATA_CMD_ID_ATA; |
48 | else | 64 | else |
49 | cmd.tf.command = ATA_CMD_ID_ATAPI; | 65 | cmd.tf.command = ATA_CMD_ID_ATAPI; |
50 | cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 66 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
67 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | ||
51 | cmd.protocol = ATA_PROT_PIO; | 68 | cmd.protocol = ATA_PROT_PIO; |
52 | 69 | ||
53 | return ide_raw_taskfile(drive, &cmd, buf, 1); | 70 | return ide_raw_taskfile(drive, &cmd, buf, 1); |
@@ -79,16 +96,27 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) | |||
79 | memcpy(cmd, orig_cmd, sizeof(*cmd)); | 96 | memcpy(cmd, orig_cmd, sizeof(*cmd)); |
80 | 97 | ||
81 | if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { | 98 | if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { |
82 | ide_tf_dump(drive->name, tf); | 99 | ide_tf_dump(drive->name, cmd); |
83 | tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); | 100 | tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); |
84 | SELECT_MASK(drive, 0); | 101 | SELECT_MASK(drive, 0); |
85 | 102 | ||
86 | if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { | 103 | if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { |
87 | u8 data[2] = { tf->data, tf->hob_data }; | 104 | u8 data[2] = { cmd->tf.data, cmd->hob.data }; |
88 | 105 | ||
89 | tp_ops->output_data(drive, cmd, data, 2); | 106 | tp_ops->output_data(drive, cmd, data, 2); |
90 | } | 107 | } |
91 | tp_ops->tf_load(drive, cmd); | 108 | |
109 | if (cmd->valid.out.tf & IDE_VALID_DEVICE) { | ||
110 | u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? | ||
111 | 0xE0 : 0xEF; | ||
112 | |||
113 | if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED)) | ||
114 | cmd->tf.device &= HIHI; | ||
115 | cmd->tf.device |= drive->select; | ||
116 | } | ||
117 | |||
118 | tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob); | ||
119 | tp_ops->tf_load(drive, &cmd->tf, cmd->valid.out.tf); | ||
92 | } | 120 | } |
93 | 121 | ||
94 | switch (cmd->protocol) { | 122 | switch (cmd->protocol) { |
@@ -489,16 +517,17 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) | |||
489 | 517 | ||
490 | memset(&cmd, 0, sizeof(cmd)); | 518 | memset(&cmd, 0, sizeof(cmd)); |
491 | 519 | ||
492 | memcpy(&cmd.tf_array[0], req_task->hob_ports, | 520 | memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2); |
493 | HDIO_DRIVE_HOB_HDR_SIZE - 2); | 521 | memcpy(&cmd.tf, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE); |
494 | memcpy(&cmd.tf_array[6], req_task->io_ports, | ||
495 | HDIO_DRIVE_TASK_HDR_SIZE); | ||
496 | 522 | ||
497 | cmd.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE | | 523 | cmd.valid.out.tf = IDE_VALID_DEVICE; |
498 | IDE_TFLAG_IN_TF; | 524 | cmd.valid.in.tf = IDE_VALID_DEVICE | IDE_VALID_IN_TF; |
525 | cmd.tf_flags = IDE_TFLAG_IO_16BIT; | ||
499 | 526 | ||
500 | if (drive->dev_flags & IDE_DFLAG_LBA48) | 527 | if (drive->dev_flags & IDE_DFLAG_LBA48) { |
501 | cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB); | 528 | cmd.tf_flags |= IDE_TFLAG_LBA48; |
529 | cmd.valid.in.hob = IDE_VALID_IN_HOB; | ||
530 | } | ||
502 | 531 | ||
503 | if (req_task->out_flags.all) { | 532 | if (req_task->out_flags.all) { |
504 | cmd.ftf_flags |= IDE_FTFLAG_FLAGGED; | 533 | cmd.ftf_flags |= IDE_FTFLAG_FLAGGED; |
@@ -507,28 +536,28 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) | |||
507 | cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA; | 536 | cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA; |
508 | 537 | ||
509 | if (req_task->out_flags.b.nsector_hob) | 538 | if (req_task->out_flags.b.nsector_hob) |
510 | cmd.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT; | 539 | cmd.valid.out.hob |= IDE_VALID_NSECT; |
511 | if (req_task->out_flags.b.sector_hob) | 540 | if (req_task->out_flags.b.sector_hob) |
512 | cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL; | 541 | cmd.valid.out.hob |= IDE_VALID_LBAL; |
513 | if (req_task->out_flags.b.lcyl_hob) | 542 | if (req_task->out_flags.b.lcyl_hob) |
514 | cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM; | 543 | cmd.valid.out.hob |= IDE_VALID_LBAM; |
515 | if (req_task->out_flags.b.hcyl_hob) | 544 | if (req_task->out_flags.b.hcyl_hob) |
516 | cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH; | 545 | cmd.valid.out.hob |= IDE_VALID_LBAH; |
517 | 546 | ||
518 | if (req_task->out_flags.b.error_feature) | 547 | if (req_task->out_flags.b.error_feature) |
519 | cmd.tf_flags |= IDE_TFLAG_OUT_FEATURE; | 548 | cmd.valid.out.tf |= IDE_VALID_FEATURE; |
520 | if (req_task->out_flags.b.nsector) | 549 | if (req_task->out_flags.b.nsector) |
521 | cmd.tf_flags |= IDE_TFLAG_OUT_NSECT; | 550 | cmd.valid.out.tf |= IDE_VALID_NSECT; |
522 | if (req_task->out_flags.b.sector) | 551 | if (req_task->out_flags.b.sector) |
523 | cmd.tf_flags |= IDE_TFLAG_OUT_LBAL; | 552 | cmd.valid.out.tf |= IDE_VALID_LBAL; |
524 | if (req_task->out_flags.b.lcyl) | 553 | if (req_task->out_flags.b.lcyl) |
525 | cmd.tf_flags |= IDE_TFLAG_OUT_LBAM; | 554 | cmd.valid.out.tf |= IDE_VALID_LBAM; |
526 | if (req_task->out_flags.b.hcyl) | 555 | if (req_task->out_flags.b.hcyl) |
527 | cmd.tf_flags |= IDE_TFLAG_OUT_LBAH; | 556 | cmd.valid.out.tf |= IDE_VALID_LBAH; |
528 | } else { | 557 | } else { |
529 | cmd.tf_flags |= IDE_TFLAG_OUT_TF; | 558 | cmd.valid.out.tf |= IDE_VALID_OUT_TF; |
530 | if (cmd.tf_flags & IDE_TFLAG_LBA48) | 559 | if (cmd.tf_flags & IDE_TFLAG_LBA48) |
531 | cmd.tf_flags |= IDE_TFLAG_OUT_HOB; | 560 | cmd.valid.out.hob |= IDE_VALID_OUT_HOB; |
532 | } | 561 | } |
533 | 562 | ||
534 | if (req_task->in_flags.b.data) | 563 | if (req_task->in_flags.b.data) |
@@ -594,7 +623,7 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) | |||
594 | if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA) | 623 | if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA) |
595 | nsect = 0; | 624 | nsect = 0; |
596 | else if (!nsect) { | 625 | else if (!nsect) { |
597 | nsect = (cmd.tf.hob_nsect << 8) | cmd.tf.nsect; | 626 | nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect; |
598 | 627 | ||
599 | if (!nsect) { | 628 | if (!nsect) { |
600 | printk(KERN_ERR "%s: in/out command without data\n", | 629 | printk(KERN_ERR "%s: in/out command without data\n", |
@@ -606,10 +635,8 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) | |||
606 | 635 | ||
607 | err = ide_raw_taskfile(drive, &cmd, data_buf, nsect); | 636 | err = ide_raw_taskfile(drive, &cmd, data_buf, nsect); |
608 | 637 | ||
609 | memcpy(req_task->hob_ports, &cmd.tf_array[0], | 638 | memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2); |
610 | HDIO_DRIVE_HOB_HDR_SIZE - 2); | 639 | memcpy(req_task->io_ports, &cmd.tf, HDIO_DRIVE_TASK_HDR_SIZE); |
611 | memcpy(req_task->io_ports, &cmd.tf_array[6], | ||
612 | HDIO_DRIVE_TASK_HDR_SIZE); | ||
613 | 640 | ||
614 | if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) && | 641 | if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) && |
615 | req_task->in_flags.all == 0) { | 642 | req_task->in_flags.all == 0) { |
diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c index 71a39fb3856f..95327a2c2422 100644 --- a/drivers/ide/ns87415.c +++ b/drivers/ide/ns87415.c | |||
@@ -61,41 +61,23 @@ static u8 superio_dma_sff_read_status(ide_hwif_t *hwif) | |||
61 | return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS); | 61 | return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void superio_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) | 64 | static void superio_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, |
65 | u8 valid) | ||
65 | { | 66 | { |
66 | struct ide_io_ports *io_ports = &drive->hwif->io_ports; | 67 | struct ide_io_ports *io_ports = &drive->hwif->io_ports; |
67 | struct ide_taskfile *tf = &cmd->tf; | ||
68 | 68 | ||
69 | /* be sure we're looking at the low order bits */ | 69 | if (valid & IDE_VALID_ERROR) |
70 | outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
71 | |||
72 | if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) | ||
73 | tf->error = inb(io_ports->feature_addr); | 70 | tf->error = inb(io_ports->feature_addr); |
74 | if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) | 71 | if (valid & IDE_VALID_NSECT) |
75 | tf->nsect = inb(io_ports->nsect_addr); | 72 | tf->nsect = inb(io_ports->nsect_addr); |
76 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) | 73 | if (valid & IDE_VALID_LBAL) |
77 | tf->lbal = inb(io_ports->lbal_addr); | 74 | tf->lbal = inb(io_ports->lbal_addr); |
78 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) | 75 | if (valid & IDE_VALID_LBAM) |
79 | tf->lbam = inb(io_ports->lbam_addr); | 76 | tf->lbam = inb(io_ports->lbam_addr); |
80 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) | 77 | if (valid & IDE_VALID_LBAH) |
81 | tf->lbah = inb(io_ports->lbah_addr); | 78 | tf->lbah = inb(io_ports->lbah_addr); |
82 | if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) | 79 | if (valid & IDE_VALID_DEVICE) |
83 | tf->device = superio_ide_inb(io_ports->device_addr); | 80 | tf->device = superio_ide_inb(io_ports->device_addr); |
84 | |||
85 | if (cmd->tf_flags & IDE_TFLAG_LBA48) { | ||
86 | outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
87 | |||
88 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) | ||
89 | tf->hob_error = inb(io_ports->feature_addr); | ||
90 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) | ||
91 | tf->hob_nsect = inb(io_ports->nsect_addr); | ||
92 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) | ||
93 | tf->hob_lbal = inb(io_ports->lbal_addr); | ||
94 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) | ||
95 | tf->hob_lbam = inb(io_ports->lbam_addr); | ||
96 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) | ||
97 | tf->hob_lbah = inb(io_ports->lbah_addr); | ||
98 | } | ||
99 | } | 81 | } |
100 | 82 | ||
101 | static void ns87415_dev_select(ide_drive_t *drive); | 83 | static void ns87415_dev_select(ide_drive_t *drive); |
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index 052b9bf1f8fb..f76e4e6b408f 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c | |||
@@ -1682,7 +1682,7 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif, | |||
1682 | * The +2 is +1 for the stop command and +1 to allow for | 1682 | * The +2 is +1 for the stop command and +1 to allow for |
1683 | * aligning the start address to a multiple of 16 bytes. | 1683 | * aligning the start address to a multiple of 16 bytes. |
1684 | */ | 1684 | */ |
1685 | pmif->dma_table_cpu = (struct dbdma_cmd*)pci_alloc_consistent( | 1685 | pmif->dma_table_cpu = pci_alloc_consistent( |
1686 | dev, | 1686 | dev, |
1687 | (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), | 1687 | (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), |
1688 | &hwif->dmatable_dma); | 1688 | &hwif->dmatable_dma); |
diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c index d007e7f66598..c79346679244 100644 --- a/drivers/ide/q40ide.c +++ b/drivers/ide/q40ide.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/blkdev.h> | 16 | #include <linux/blkdev.h> |
17 | #include <linux/ide.h> | 17 | #include <linux/ide.h> |
18 | 18 | ||
19 | #include <asm/ide.h> | ||
20 | |||
19 | /* | 21 | /* |
20 | * Bases of the IDE interfaces | 22 | * Bases of the IDE interfaces |
21 | */ | 23 | */ |
@@ -77,8 +79,10 @@ static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, | |||
77 | { | 79 | { |
78 | unsigned long data_addr = drive->hwif->io_ports.data_addr; | 80 | unsigned long data_addr = drive->hwif->io_ports.data_addr; |
79 | 81 | ||
80 | if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) | 82 | if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { |
81 | return insw(data_addr, buf, (len + 1) / 2); | 83 | __ide_mm_insw(data_addr, buf, (len + 1) / 2); |
84 | return; | ||
85 | } | ||
82 | 86 | ||
83 | raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); | 87 | raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); |
84 | } | 88 | } |
@@ -88,8 +92,10 @@ static void q40ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, | |||
88 | { | 92 | { |
89 | unsigned long data_addr = drive->hwif->io_ports.data_addr; | 93 | unsigned long data_addr = drive->hwif->io_ports.data_addr; |
90 | 94 | ||
91 | if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) | 95 | if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { |
92 | return outsw(data_addr, buf, (len + 1) / 2); | 96 | __ide_mm_outsw(data_addr, buf, (len + 1) / 2); |
97 | return; | ||
98 | } | ||
93 | 99 | ||
94 | raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); | 100 | raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); |
95 | } | 101 | } |
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c index 6d8dbd9c10bc..5be41f25204f 100644 --- a/drivers/ide/scc_pata.c +++ b/drivers/ide/scc_pata.c | |||
@@ -337,7 +337,6 @@ static void scc_dma_start(ide_drive_t *drive) | |||
337 | 337 | ||
338 | /* start DMA */ | 338 | /* start DMA */ |
339 | scc_ide_outb(dma_cmd | 1, hwif->dma_base); | 339 | scc_ide_outb(dma_cmd | 1, hwif->dma_base); |
340 | wmb(); | ||
341 | } | 340 | } |
342 | 341 | ||
343 | static int __scc_dma_end(ide_drive_t *drive) | 342 | static int __scc_dma_end(ide_drive_t *drive) |
@@ -354,7 +353,6 @@ static int __scc_dma_end(ide_drive_t *drive) | |||
354 | /* clear the INTR & ERROR bits */ | 353 | /* clear the INTR & ERROR bits */ |
355 | scc_ide_outb(dma_stat | 6, hwif->dma_base + 4); | 354 | scc_ide_outb(dma_stat | 6, hwif->dma_base + 4); |
356 | /* verify good DMA status */ | 355 | /* verify good DMA status */ |
357 | wmb(); | ||
358 | return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; | 356 | return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; |
359 | } | 357 | } |
360 | 358 | ||
@@ -647,77 +645,40 @@ static int __devinit init_setup_scc(struct pci_dev *dev, | |||
647 | return rc; | 645 | return rc; |
648 | } | 646 | } |
649 | 647 | ||
650 | static void scc_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) | 648 | static void scc_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) |
651 | { | 649 | { |
652 | struct ide_io_ports *io_ports = &drive->hwif->io_ports; | 650 | struct ide_io_ports *io_ports = &drive->hwif->io_ports; |
653 | struct ide_taskfile *tf = &cmd->tf; | 651 | |
654 | u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; | 652 | if (valid & IDE_VALID_FEATURE) |
655 | |||
656 | if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) | ||
657 | HIHI = 0xFF; | ||
658 | |||
659 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) | ||
660 | scc_ide_outb(tf->hob_feature, io_ports->feature_addr); | ||
661 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) | ||
662 | scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr); | ||
663 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) | ||
664 | scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr); | ||
665 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) | ||
666 | scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr); | ||
667 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) | ||
668 | scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr); | ||
669 | |||
670 | if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) | ||
671 | scc_ide_outb(tf->feature, io_ports->feature_addr); | 653 | scc_ide_outb(tf->feature, io_ports->feature_addr); |
672 | if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) | 654 | if (valid & IDE_VALID_NSECT) |
673 | scc_ide_outb(tf->nsect, io_ports->nsect_addr); | 655 | scc_ide_outb(tf->nsect, io_ports->nsect_addr); |
674 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) | 656 | if (valid & IDE_VALID_LBAL) |
675 | scc_ide_outb(tf->lbal, io_ports->lbal_addr); | 657 | scc_ide_outb(tf->lbal, io_ports->lbal_addr); |
676 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) | 658 | if (valid & IDE_VALID_LBAM) |
677 | scc_ide_outb(tf->lbam, io_ports->lbam_addr); | 659 | scc_ide_outb(tf->lbam, io_ports->lbam_addr); |
678 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) | 660 | if (valid & IDE_VALID_LBAH) |
679 | scc_ide_outb(tf->lbah, io_ports->lbah_addr); | 661 | scc_ide_outb(tf->lbah, io_ports->lbah_addr); |
680 | 662 | if (valid & IDE_VALID_DEVICE) | |
681 | if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) | 663 | scc_ide_outb(tf->device, io_ports->device_addr); |
682 | scc_ide_outb((tf->device & HIHI) | drive->select, | ||
683 | io_ports->device_addr); | ||
684 | } | 664 | } |
685 | 665 | ||
686 | static void scc_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) | 666 | static void scc_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) |
687 | { | 667 | { |
688 | struct ide_io_ports *io_ports = &drive->hwif->io_ports; | 668 | struct ide_io_ports *io_ports = &drive->hwif->io_ports; |
689 | struct ide_taskfile *tf = &cmd->tf; | ||
690 | |||
691 | /* be sure we're looking at the low order bits */ | ||
692 | scc_ide_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
693 | 669 | ||
694 | if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) | 670 | if (valid & IDE_VALID_ERROR) |
695 | tf->error = scc_ide_inb(io_ports->feature_addr); | 671 | tf->error = scc_ide_inb(io_ports->feature_addr); |
696 | if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) | 672 | if (valid & IDE_VALID_NSECT) |
697 | tf->nsect = scc_ide_inb(io_ports->nsect_addr); | 673 | tf->nsect = scc_ide_inb(io_ports->nsect_addr); |
698 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) | 674 | if (valid & IDE_VALID_LBAL) |
699 | tf->lbal = scc_ide_inb(io_ports->lbal_addr); | 675 | tf->lbal = scc_ide_inb(io_ports->lbal_addr); |
700 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) | 676 | if (valid & IDE_VALID_LBAM) |
701 | tf->lbam = scc_ide_inb(io_ports->lbam_addr); | 677 | tf->lbam = scc_ide_inb(io_ports->lbam_addr); |
702 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) | 678 | if (valid & IDE_VALID_LBAH) |
703 | tf->lbah = scc_ide_inb(io_ports->lbah_addr); | 679 | tf->lbah = scc_ide_inb(io_ports->lbah_addr); |
704 | if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) | 680 | if (valid & IDE_VALID_DEVICE) |
705 | tf->device = scc_ide_inb(io_ports->device_addr); | 681 | tf->device = scc_ide_inb(io_ports->device_addr); |
706 | |||
707 | if (cmd->tf_flags & IDE_TFLAG_LBA48) { | ||
708 | scc_ide_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
709 | |||
710 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) | ||
711 | tf->hob_error = scc_ide_inb(io_ports->feature_addr); | ||
712 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) | ||
713 | tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr); | ||
714 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) | ||
715 | tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr); | ||
716 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) | ||
717 | tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr); | ||
718 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) | ||
719 | tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr); | ||
720 | } | ||
721 | } | 682 | } |
722 | 683 | ||
723 | static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd, | 684 | static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd, |
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c index 4cb79c4c2604..e33d764e2945 100644 --- a/drivers/ide/tx4938ide.c +++ b/drivers/ide/tx4938ide.c | |||
@@ -72,91 +72,6 @@ static void tx4938ide_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
72 | #ifdef __BIG_ENDIAN | 72 | #ifdef __BIG_ENDIAN |
73 | 73 | ||
74 | /* custom iops (independent from SWAP_IO_SPACE) */ | 74 | /* custom iops (independent from SWAP_IO_SPACE) */ |
75 | static u8 tx4938ide_inb(unsigned long port) | ||
76 | { | ||
77 | return __raw_readb((void __iomem *)port); | ||
78 | } | ||
79 | |||
80 | static void tx4938ide_outb(u8 value, unsigned long port) | ||
81 | { | ||
82 | __raw_writeb(value, (void __iomem *)port); | ||
83 | } | ||
84 | |||
85 | static void tx4938ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) | ||
86 | { | ||
87 | ide_hwif_t *hwif = drive->hwif; | ||
88 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
89 | struct ide_taskfile *tf = &cmd->tf; | ||
90 | u8 HIHI = cmd->tf_flags & IDE_TFLAG_LBA48 ? 0xE0 : 0xEF; | ||
91 | |||
92 | if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) | ||
93 | HIHI = 0xFF; | ||
94 | |||
95 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) | ||
96 | tx4938ide_outb(tf->hob_feature, io_ports->feature_addr); | ||
97 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) | ||
98 | tx4938ide_outb(tf->hob_nsect, io_ports->nsect_addr); | ||
99 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) | ||
100 | tx4938ide_outb(tf->hob_lbal, io_ports->lbal_addr); | ||
101 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) | ||
102 | tx4938ide_outb(tf->hob_lbam, io_ports->lbam_addr); | ||
103 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) | ||
104 | tx4938ide_outb(tf->hob_lbah, io_ports->lbah_addr); | ||
105 | |||
106 | if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) | ||
107 | tx4938ide_outb(tf->feature, io_ports->feature_addr); | ||
108 | if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) | ||
109 | tx4938ide_outb(tf->nsect, io_ports->nsect_addr); | ||
110 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) | ||
111 | tx4938ide_outb(tf->lbal, io_ports->lbal_addr); | ||
112 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) | ||
113 | tx4938ide_outb(tf->lbam, io_ports->lbam_addr); | ||
114 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) | ||
115 | tx4938ide_outb(tf->lbah, io_ports->lbah_addr); | ||
116 | |||
117 | if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) | ||
118 | tx4938ide_outb((tf->device & HIHI) | drive->select, | ||
119 | io_ports->device_addr); | ||
120 | } | ||
121 | |||
122 | static void tx4938ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) | ||
123 | { | ||
124 | ide_hwif_t *hwif = drive->hwif; | ||
125 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
126 | struct ide_taskfile *tf = &cmd->tf; | ||
127 | |||
128 | /* be sure we're looking at the low order bits */ | ||
129 | tx4938ide_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
130 | |||
131 | if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) | ||
132 | tf->error = tx4938ide_inb(io_ports->feature_addr); | ||
133 | if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) | ||
134 | tf->nsect = tx4938ide_inb(io_ports->nsect_addr); | ||
135 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) | ||
136 | tf->lbal = tx4938ide_inb(io_ports->lbal_addr); | ||
137 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) | ||
138 | tf->lbam = tx4938ide_inb(io_ports->lbam_addr); | ||
139 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) | ||
140 | tf->lbah = tx4938ide_inb(io_ports->lbah_addr); | ||
141 | if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) | ||
142 | tf->device = tx4938ide_inb(io_ports->device_addr); | ||
143 | |||
144 | if (cmd->tf_flags & IDE_TFLAG_LBA48) { | ||
145 | tx4938ide_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
146 | |||
147 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) | ||
148 | tf->hob_error = tx4938ide_inb(io_ports->feature_addr); | ||
149 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) | ||
150 | tf->hob_nsect = tx4938ide_inb(io_ports->nsect_addr); | ||
151 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) | ||
152 | tf->hob_lbal = tx4938ide_inb(io_ports->lbal_addr); | ||
153 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) | ||
154 | tf->hob_lbam = tx4938ide_inb(io_ports->lbam_addr); | ||
155 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) | ||
156 | tf->hob_lbah = tx4938ide_inb(io_ports->lbah_addr); | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static void tx4938ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, | 75 | static void tx4938ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, |
161 | void *buf, unsigned int len) | 76 | void *buf, unsigned int len) |
162 | { | 77 | { |
@@ -190,8 +105,8 @@ static const struct ide_tp_ops tx4938ide_tp_ops = { | |||
190 | .write_devctl = ide_write_devctl, | 105 | .write_devctl = ide_write_devctl, |
191 | 106 | ||
192 | .dev_select = ide_dev_select, | 107 | .dev_select = ide_dev_select, |
193 | .tf_load = tx4938ide_tf_load, | 108 | .tf_load = ide_tf_load, |
194 | .tf_read = tx4938ide_tf_read, | 109 | .tf_read = ide_tf_read, |
195 | 110 | ||
196 | .input_data = tx4938ide_input_data_swap, | 111 | .input_data = tx4938ide_input_data_swap, |
197 | .output_data = tx4938ide_output_data_swap, | 112 | .output_data = tx4938ide_output_data_swap, |
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c index 0040a9a3e26e..564422d23976 100644 --- a/drivers/ide/tx4939ide.c +++ b/drivers/ide/tx4939ide.c | |||
@@ -327,15 +327,15 @@ static int tx4939ide_dma_end(ide_drive_t *drive) | |||
327 | /* read and clear the INTR & ERROR bits */ | 327 | /* read and clear the INTR & ERROR bits */ |
328 | dma_stat = tx4939ide_clear_dma_status(base); | 328 | dma_stat = tx4939ide_clear_dma_status(base); |
329 | 329 | ||
330 | wmb(); | 330 | #define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR) |
331 | 331 | ||
332 | /* verify good DMA status */ | 332 | /* verify good DMA status */ |
333 | if ((dma_stat & (ATA_DMA_INTR | ATA_DMA_ERR | ATA_DMA_ACTIVE)) == 0 && | 333 | if ((dma_stat & CHECK_DMA_MASK) == 0 && |
334 | (ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) == | 334 | (ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) == |
335 | (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) | 335 | (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) |
336 | /* INT_IDE lost... bug? */ | 336 | /* INT_IDE lost... bug? */ |
337 | return 0; | 337 | return 0; |
338 | return ((dma_stat & (ATA_DMA_INTR | ATA_DMA_ERR | ATA_DMA_ACTIVE)) != | 338 | return ((dma_stat & CHECK_DMA_MASK) != |
339 | ATA_DMA_INTR) ? 0x10 | dma_stat : 0; | 339 | ATA_DMA_INTR) ? 0x10 | dma_stat : 0; |
340 | } | 340 | } |
341 | 341 | ||
@@ -434,97 +434,19 @@ static void tx4939ide_tf_load_fixup(ide_drive_t *drive) | |||
434 | tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl); | 434 | tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl); |
435 | } | 435 | } |
436 | 436 | ||
437 | #ifdef __BIG_ENDIAN | 437 | static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, |
438 | 438 | u8 valid) | |
439 | /* custom iops (independent from SWAP_IO_SPACE) */ | ||
440 | static u8 tx4939ide_inb(unsigned long port) | ||
441 | { | 439 | { |
442 | return __raw_readb((void __iomem *)port); | 440 | ide_tf_load(drive, tf, valid); |
443 | } | ||
444 | 441 | ||
445 | static void tx4939ide_outb(u8 value, unsigned long port) | 442 | if (valid & IDE_VALID_DEVICE) |
446 | { | ||
447 | __raw_writeb(value, (void __iomem *)port); | ||
448 | } | ||
449 | |||
450 | static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) | ||
451 | { | ||
452 | ide_hwif_t *hwif = drive->hwif; | ||
453 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
454 | struct ide_taskfile *tf = &cmd->tf; | ||
455 | u8 HIHI = cmd->tf_flags & IDE_TFLAG_LBA48 ? 0xE0 : 0xEF; | ||
456 | |||
457 | if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) | ||
458 | HIHI = 0xFF; | ||
459 | |||
460 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) | ||
461 | tx4939ide_outb(tf->hob_feature, io_ports->feature_addr); | ||
462 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) | ||
463 | tx4939ide_outb(tf->hob_nsect, io_ports->nsect_addr); | ||
464 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) | ||
465 | tx4939ide_outb(tf->hob_lbal, io_ports->lbal_addr); | ||
466 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) | ||
467 | tx4939ide_outb(tf->hob_lbam, io_ports->lbam_addr); | ||
468 | if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) | ||
469 | tx4939ide_outb(tf->hob_lbah, io_ports->lbah_addr); | ||
470 | |||
471 | if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) | ||
472 | tx4939ide_outb(tf->feature, io_ports->feature_addr); | ||
473 | if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) | ||
474 | tx4939ide_outb(tf->nsect, io_ports->nsect_addr); | ||
475 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) | ||
476 | tx4939ide_outb(tf->lbal, io_ports->lbal_addr); | ||
477 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) | ||
478 | tx4939ide_outb(tf->lbam, io_ports->lbam_addr); | ||
479 | if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) | ||
480 | tx4939ide_outb(tf->lbah, io_ports->lbah_addr); | ||
481 | |||
482 | if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) { | ||
483 | tx4939ide_outb((tf->device & HIHI) | drive->select, | ||
484 | io_ports->device_addr); | ||
485 | tx4939ide_tf_load_fixup(drive); | 443 | tx4939ide_tf_load_fixup(drive); |
486 | } | ||
487 | } | 444 | } |
488 | 445 | ||
489 | static void tx4939ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) | 446 | #ifdef __BIG_ENDIAN |
490 | { | ||
491 | ide_hwif_t *hwif = drive->hwif; | ||
492 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
493 | struct ide_taskfile *tf = &cmd->tf; | ||
494 | |||
495 | /* be sure we're looking at the low order bits */ | ||
496 | tx4939ide_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
497 | |||
498 | if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) | ||
499 | tf->error = tx4939ide_inb(io_ports->feature_addr); | ||
500 | if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) | ||
501 | tf->nsect = tx4939ide_inb(io_ports->nsect_addr); | ||
502 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) | ||
503 | tf->lbal = tx4939ide_inb(io_ports->lbal_addr); | ||
504 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) | ||
505 | tf->lbam = tx4939ide_inb(io_ports->lbam_addr); | ||
506 | if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) | ||
507 | tf->lbah = tx4939ide_inb(io_ports->lbah_addr); | ||
508 | if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) | ||
509 | tf->device = tx4939ide_inb(io_ports->device_addr); | ||
510 | |||
511 | if (cmd->tf_flags & IDE_TFLAG_LBA48) { | ||
512 | tx4939ide_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
513 | |||
514 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) | ||
515 | tf->hob_error = tx4939ide_inb(io_ports->feature_addr); | ||
516 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) | ||
517 | tf->hob_nsect = tx4939ide_inb(io_ports->nsect_addr); | ||
518 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) | ||
519 | tf->hob_lbal = tx4939ide_inb(io_ports->lbal_addr); | ||
520 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) | ||
521 | tf->hob_lbam = tx4939ide_inb(io_ports->lbam_addr); | ||
522 | if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) | ||
523 | tf->hob_lbah = tx4939ide_inb(io_ports->lbah_addr); | ||
524 | } | ||
525 | } | ||
526 | 447 | ||
527 | static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq, | 448 | /* custom iops (independent from SWAP_IO_SPACE) */ |
449 | static void tx4939ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, | ||
528 | void *buf, unsigned int len) | 450 | void *buf, unsigned int len) |
529 | { | 451 | { |
530 | unsigned long port = drive->hwif->io_ports.data_addr; | 452 | unsigned long port = drive->hwif->io_ports.data_addr; |
@@ -536,7 +458,7 @@ static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq, | |||
536 | __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2)); | 458 | __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2)); |
537 | } | 459 | } |
538 | 460 | ||
539 | static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq, | 461 | static void tx4939ide_output_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, |
540 | void *buf, unsigned int len) | 462 | void *buf, unsigned int len) |
541 | { | 463 | { |
542 | unsigned long port = drive->hwif->io_ports.data_addr; | 464 | unsigned long port = drive->hwif->io_ports.data_addr; |
@@ -558,7 +480,7 @@ static const struct ide_tp_ops tx4939ide_tp_ops = { | |||
558 | 480 | ||
559 | .dev_select = ide_dev_select, | 481 | .dev_select = ide_dev_select, |
560 | .tf_load = tx4939ide_tf_load, | 482 | .tf_load = tx4939ide_tf_load, |
561 | .tf_read = tx4939ide_tf_read, | 483 | .tf_read = ide_tf_read, |
562 | 484 | ||
563 | .input_data = tx4939ide_input_data_swap, | 485 | .input_data = tx4939ide_input_data_swap, |
564 | .output_data = tx4939ide_output_data_swap, | 486 | .output_data = tx4939ide_output_data_swap, |
@@ -566,14 +488,6 @@ static const struct ide_tp_ops tx4939ide_tp_ops = { | |||
566 | 488 | ||
567 | #else /* __LITTLE_ENDIAN */ | 489 | #else /* __LITTLE_ENDIAN */ |
568 | 490 | ||
569 | static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) | ||
570 | { | ||
571 | ide_tf_load(drive, cmd); | ||
572 | |||
573 | if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) | ||
574 | tx4939ide_tf_load_fixup(drive); | ||
575 | } | ||
576 | |||
577 | static const struct ide_tp_ops tx4939ide_tp_ops = { | 491 | static const struct ide_tp_ops tx4939ide_tp_ops = { |
578 | .exec_command = ide_exec_command, | 492 | .exec_command = ide_exec_command, |
579 | .read_status = ide_read_status, | 493 | .read_status = ide_read_status, |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 2a2e50871b40..851de83ff455 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -297,21 +297,25 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv) | |||
297 | id_priv->cma_dev = NULL; | 297 | id_priv->cma_dev = NULL; |
298 | } | 298 | } |
299 | 299 | ||
300 | static int cma_set_qkey(struct ib_device *device, u8 port_num, | 300 | static int cma_set_qkey(struct rdma_id_private *id_priv) |
301 | enum rdma_port_space ps, | ||
302 | struct rdma_dev_addr *dev_addr, u32 *qkey) | ||
303 | { | 301 | { |
304 | struct ib_sa_mcmember_rec rec; | 302 | struct ib_sa_mcmember_rec rec; |
305 | int ret = 0; | 303 | int ret = 0; |
306 | 304 | ||
307 | switch (ps) { | 305 | if (id_priv->qkey) |
306 | return 0; | ||
307 | |||
308 | switch (id_priv->id.ps) { | ||
308 | case RDMA_PS_UDP: | 309 | case RDMA_PS_UDP: |
309 | *qkey = RDMA_UDP_QKEY; | 310 | id_priv->qkey = RDMA_UDP_QKEY; |
310 | break; | 311 | break; |
311 | case RDMA_PS_IPOIB: | 312 | case RDMA_PS_IPOIB: |
312 | ib_addr_get_mgid(dev_addr, &rec.mgid); | 313 | ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); |
313 | ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec); | 314 | ret = ib_sa_get_mcmember_rec(id_priv->id.device, |
314 | *qkey = be32_to_cpu(rec.qkey); | 315 | id_priv->id.port_num, &rec.mgid, |
316 | &rec); | ||
317 | if (!ret) | ||
318 | id_priv->qkey = be32_to_cpu(rec.qkey); | ||
315 | break; | 319 | break; |
316 | default: | 320 | default: |
317 | break; | 321 | break; |
@@ -341,12 +345,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) | |||
341 | ret = ib_find_cached_gid(cma_dev->device, &gid, | 345 | ret = ib_find_cached_gid(cma_dev->device, &gid, |
342 | &id_priv->id.port_num, NULL); | 346 | &id_priv->id.port_num, NULL); |
343 | if (!ret) { | 347 | if (!ret) { |
344 | ret = cma_set_qkey(cma_dev->device, | 348 | cma_attach_to_dev(id_priv, cma_dev); |
345 | id_priv->id.port_num, | ||
346 | id_priv->id.ps, dev_addr, | ||
347 | &id_priv->qkey); | ||
348 | if (!ret) | ||
349 | cma_attach_to_dev(id_priv, cma_dev); | ||
350 | break; | 349 | break; |
351 | } | 350 | } |
352 | } | 351 | } |
@@ -578,6 +577,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, | |||
578 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; | 577 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; |
579 | 578 | ||
580 | if (cma_is_ud_ps(id_priv->id.ps)) { | 579 | if (cma_is_ud_ps(id_priv->id.ps)) { |
580 | ret = cma_set_qkey(id_priv); | ||
581 | if (ret) | ||
582 | return ret; | ||
583 | |||
581 | qp_attr->qkey = id_priv->qkey; | 584 | qp_attr->qkey = id_priv->qkey; |
582 | *qp_attr_mask |= IB_QP_QKEY; | 585 | *qp_attr_mask |= IB_QP_QKEY; |
583 | } else { | 586 | } else { |
@@ -2201,6 +2204,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
2201 | event.status = ib_event->param.sidr_rep_rcvd.status; | 2204 | event.status = ib_event->param.sidr_rep_rcvd.status; |
2202 | break; | 2205 | break; |
2203 | } | 2206 | } |
2207 | ret = cma_set_qkey(id_priv); | ||
2208 | if (ret) { | ||
2209 | event.event = RDMA_CM_EVENT_ADDR_ERROR; | ||
2210 | event.status = -EINVAL; | ||
2211 | break; | ||
2212 | } | ||
2204 | if (id_priv->qkey != rep->qkey) { | 2213 | if (id_priv->qkey != rep->qkey) { |
2205 | event.event = RDMA_CM_EVENT_UNREACHABLE; | 2214 | event.event = RDMA_CM_EVENT_UNREACHABLE; |
2206 | event.status = -EINVAL; | 2215 | event.status = -EINVAL; |
@@ -2480,10 +2489,14 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv, | |||
2480 | const void *private_data, int private_data_len) | 2489 | const void *private_data, int private_data_len) |
2481 | { | 2490 | { |
2482 | struct ib_cm_sidr_rep_param rep; | 2491 | struct ib_cm_sidr_rep_param rep; |
2492 | int ret; | ||
2483 | 2493 | ||
2484 | memset(&rep, 0, sizeof rep); | 2494 | memset(&rep, 0, sizeof rep); |
2485 | rep.status = status; | 2495 | rep.status = status; |
2486 | if (status == IB_SIDR_SUCCESS) { | 2496 | if (status == IB_SIDR_SUCCESS) { |
2497 | ret = cma_set_qkey(id_priv); | ||
2498 | if (ret) | ||
2499 | return ret; | ||
2487 | rep.qp_num = id_priv->qp_num; | 2500 | rep.qp_num = id_priv->qp_num; |
2488 | rep.qkey = id_priv->qkey; | 2501 | rep.qkey = id_priv->qkey; |
2489 | } | 2502 | } |
@@ -2713,6 +2726,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, | |||
2713 | IB_SA_MCMEMBER_REC_FLOW_LABEL | | 2726 | IB_SA_MCMEMBER_REC_FLOW_LABEL | |
2714 | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; | 2727 | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; |
2715 | 2728 | ||
2729 | if (id_priv->id.ps == RDMA_PS_IPOIB) | ||
2730 | comp_mask |= IB_SA_MCMEMBER_REC_RATE | | ||
2731 | IB_SA_MCMEMBER_REC_RATE_SELECTOR; | ||
2732 | |||
2716 | mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, | 2733 | mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, |
2717 | id_priv->id.port_num, &rec, | 2734 | id_priv->id.port_num, &rec, |
2718 | comp_mask, GFP_KERNEL, | 2735 | comp_mask, GFP_KERNEL, |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index a4a82bff7100..8d71086f5a1c 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
@@ -152,7 +152,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) | |||
152 | sge_cmd = qpid << 8 | 3; | 152 | sge_cmd = qpid << 8 | 3; |
153 | wqe->sge_cmd = cpu_to_be64(sge_cmd); | 153 | wqe->sge_cmd = cpu_to_be64(sge_cmd); |
154 | skb->priority = CPL_PRIORITY_CONTROL; | 154 | skb->priority = CPL_PRIORITY_CONTROL; |
155 | return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); | 155 | return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); |
156 | } | 156 | } |
157 | 157 | ||
158 | int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | 158 | int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) |
@@ -571,7 +571,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) | |||
571 | (unsigned long long) rdev_p->ctrl_qp.dma_addr, | 571 | (unsigned long long) rdev_p->ctrl_qp.dma_addr, |
572 | rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2); | 572 | rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2); |
573 | skb->priority = CPL_PRIORITY_CONTROL; | 573 | skb->priority = CPL_PRIORITY_CONTROL; |
574 | return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); | 574 | return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); |
575 | err: | 575 | err: |
576 | kfree_skb(skb); | 576 | kfree_skb(skb); |
577 | return err; | 577 | return err; |
@@ -701,7 +701,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, | |||
701 | u32 stag_idx; | 701 | u32 stag_idx; |
702 | u32 wptr; | 702 | u32 wptr; |
703 | 703 | ||
704 | if (rdev_p->flags) | 704 | if (cxio_fatal_error(rdev_p)) |
705 | return -EIO; | 705 | return -EIO; |
706 | 706 | ||
707 | stag_state = stag_state > 0; | 707 | stag_state = stag_state > 0; |
@@ -858,7 +858,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) | |||
858 | wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size); | 858 | wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size); |
859 | wqe->irs = cpu_to_be32(attr->irs); | 859 | wqe->irs = cpu_to_be32(attr->irs); |
860 | skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */ | 860 | skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */ |
861 | return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); | 861 | return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); |
862 | } | 862 | } |
863 | 863 | ||
864 | void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb) | 864 | void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb) |
@@ -1041,9 +1041,9 @@ void cxio_rdev_close(struct cxio_rdev *rdev_p) | |||
1041 | cxio_hal_pblpool_destroy(rdev_p); | 1041 | cxio_hal_pblpool_destroy(rdev_p); |
1042 | cxio_hal_rqtpool_destroy(rdev_p); | 1042 | cxio_hal_rqtpool_destroy(rdev_p); |
1043 | list_del(&rdev_p->entry); | 1043 | list_del(&rdev_p->entry); |
1044 | rdev_p->t3cdev_p->ulp = NULL; | ||
1045 | cxio_hal_destroy_ctrl_qp(rdev_p); | 1044 | cxio_hal_destroy_ctrl_qp(rdev_p); |
1046 | cxio_hal_destroy_resource(rdev_p->rscp); | 1045 | cxio_hal_destroy_resource(rdev_p->rscp); |
1046 | rdev_p->t3cdev_p->ulp = NULL; | ||
1047 | } | 1047 | } |
1048 | } | 1048 | } |
1049 | 1049 | ||
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 094a66d1480c..bfd03bf8be54 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h | |||
@@ -115,6 +115,11 @@ struct cxio_rdev { | |||
115 | #define CXIO_ERROR_FATAL 1 | 115 | #define CXIO_ERROR_FATAL 1 |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static inline int cxio_fatal_error(struct cxio_rdev *rdev_p) | ||
119 | { | ||
120 | return rdev_p->flags & CXIO_ERROR_FATAL; | ||
121 | } | ||
122 | |||
118 | static inline int cxio_num_stags(struct cxio_rdev *rdev_p) | 123 | static inline int cxio_num_stags(struct cxio_rdev *rdev_p) |
119 | { | 124 | { |
120 | return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5)); | 125 | return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5)); |
@@ -188,6 +193,7 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); | |||
188 | void cxio_flush_hw_cq(struct t3_cq *cq); | 193 | void cxio_flush_hw_cq(struct t3_cq *cq); |
189 | int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, | 194 | int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, |
190 | u8 *cqe_flushed, u64 *cookie, u32 *credit); | 195 | u8 *cqe_flushed, u64 *cookie, u32 *credit); |
196 | int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb); | ||
191 | 197 | ||
192 | #define MOD "iw_cxgb3: " | 198 | #define MOD "iw_cxgb3: " |
193 | #define PDBG(fmt, args...) pr_debug(MOD fmt, ## args) | 199 | #define PDBG(fmt, args...) pr_debug(MOD fmt, ## args) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c index 37a4fc264a07..26fc0a4eaa74 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.c +++ b/drivers/infiniband/hw/cxgb3/iwch.c | |||
@@ -165,12 +165,19 @@ static void close_rnic_dev(struct t3cdev *tdev) | |||
165 | static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) | 165 | static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) |
166 | { | 166 | { |
167 | struct cxio_rdev *rdev = tdev->ulp; | 167 | struct cxio_rdev *rdev = tdev->ulp; |
168 | struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev); | ||
169 | struct ib_event event; | ||
168 | 170 | ||
169 | if (status == OFFLOAD_STATUS_DOWN) | 171 | if (status == OFFLOAD_STATUS_DOWN) { |
170 | rdev->flags = CXIO_ERROR_FATAL; | 172 | rdev->flags = CXIO_ERROR_FATAL; |
171 | 173 | ||
172 | return; | 174 | event.device = &rnicp->ibdev; |
175 | event.event = IB_EVENT_DEVICE_FATAL; | ||
176 | event.element.port_num = 0; | ||
177 | ib_dispatch_event(&event); | ||
178 | } | ||
173 | 179 | ||
180 | return; | ||
174 | } | 181 | } |
175 | 182 | ||
176 | static int __init iwch_init_module(void) | 183 | static int __init iwch_init_module(void) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h index 3773453b2cf0..84735506333f 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.h +++ b/drivers/infiniband/hw/cxgb3/iwch.h | |||
@@ -117,6 +117,11 @@ static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) | |||
117 | return container_of(ibdev, struct iwch_dev, ibdev); | 117 | return container_of(ibdev, struct iwch_dev, ibdev); |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev) | ||
121 | { | ||
122 | return container_of(rdev, struct iwch_dev, rdev); | ||
123 | } | ||
124 | |||
120 | static inline int t3b_device(const struct iwch_dev *rhp) | 125 | static inline int t3b_device(const struct iwch_dev *rhp) |
121 | { | 126 | { |
122 | return rhp->rdev.t3cdev_p->type == T3B; | 127 | return rhp->rdev.t3cdev_p->type == T3B; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 8699947aaf6c..fef3f1ae7225 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -139,6 +139,38 @@ static void stop_ep_timer(struct iwch_ep *ep) | |||
139 | put_ep(&ep->com); | 139 | put_ep(&ep->com); |
140 | } | 140 | } |
141 | 141 | ||
142 | int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e) | ||
143 | { | ||
144 | int error = 0; | ||
145 | struct cxio_rdev *rdev; | ||
146 | |||
147 | rdev = (struct cxio_rdev *)tdev->ulp; | ||
148 | if (cxio_fatal_error(rdev)) { | ||
149 | kfree_skb(skb); | ||
150 | return -EIO; | ||
151 | } | ||
152 | error = l2t_send(tdev, skb, l2e); | ||
153 | if (error) | ||
154 | kfree_skb(skb); | ||
155 | return error; | ||
156 | } | ||
157 | |||
158 | int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) | ||
159 | { | ||
160 | int error = 0; | ||
161 | struct cxio_rdev *rdev; | ||
162 | |||
163 | rdev = (struct cxio_rdev *)tdev->ulp; | ||
164 | if (cxio_fatal_error(rdev)) { | ||
165 | kfree_skb(skb); | ||
166 | return -EIO; | ||
167 | } | ||
168 | error = cxgb3_ofld_send(tdev, skb); | ||
169 | if (error) | ||
170 | kfree_skb(skb); | ||
171 | return error; | ||
172 | } | ||
173 | |||
142 | static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) | 174 | static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) |
143 | { | 175 | { |
144 | struct cpl_tid_release *req; | 176 | struct cpl_tid_release *req; |
@@ -150,7 +182,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) | |||
150 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | 182 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
151 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); | 183 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); |
152 | skb->priority = CPL_PRIORITY_SETUP; | 184 | skb->priority = CPL_PRIORITY_SETUP; |
153 | cxgb3_ofld_send(tdev, skb); | 185 | iwch_cxgb3_ofld_send(tdev, skb); |
154 | return; | 186 | return; |
155 | } | 187 | } |
156 | 188 | ||
@@ -172,8 +204,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep) | |||
172 | req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); | 204 | req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); |
173 | 205 | ||
174 | skb->priority = CPL_PRIORITY_DATA; | 206 | skb->priority = CPL_PRIORITY_DATA; |
175 | cxgb3_ofld_send(ep->com.tdev, skb); | 207 | return iwch_cxgb3_ofld_send(ep->com.tdev, skb); |
176 | return 0; | ||
177 | } | 208 | } |
178 | 209 | ||
179 | int iwch_resume_tid(struct iwch_ep *ep) | 210 | int iwch_resume_tid(struct iwch_ep *ep) |
@@ -194,8 +225,7 @@ int iwch_resume_tid(struct iwch_ep *ep) | |||
194 | req->val = 0; | 225 | req->val = 0; |
195 | 226 | ||
196 | skb->priority = CPL_PRIORITY_DATA; | 227 | skb->priority = CPL_PRIORITY_DATA; |
197 | cxgb3_ofld_send(ep->com.tdev, skb); | 228 | return iwch_cxgb3_ofld_send(ep->com.tdev, skb); |
198 | return 0; | ||
199 | } | 229 | } |
200 | 230 | ||
201 | static void set_emss(struct iwch_ep *ep, u16 opt) | 231 | static void set_emss(struct iwch_ep *ep, u16 opt) |
@@ -252,18 +282,22 @@ static void *alloc_ep(int size, gfp_t gfp) | |||
252 | 282 | ||
253 | void __free_ep(struct kref *kref) | 283 | void __free_ep(struct kref *kref) |
254 | { | 284 | { |
255 | struct iwch_ep_common *epc; | 285 | struct iwch_ep *ep; |
256 | epc = container_of(kref, struct iwch_ep_common, kref); | 286 | ep = container_of(container_of(kref, struct iwch_ep_common, kref), |
257 | PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]); | 287 | struct iwch_ep, com); |
258 | kfree(epc); | 288 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); |
289 | if (ep->com.flags & RELEASE_RESOURCES) { | ||
290 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); | ||
291 | dst_release(ep->dst); | ||
292 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | ||
293 | } | ||
294 | kfree(ep); | ||
259 | } | 295 | } |
260 | 296 | ||
261 | static void release_ep_resources(struct iwch_ep *ep) | 297 | static void release_ep_resources(struct iwch_ep *ep) |
262 | { | 298 | { |
263 | PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); | 299 | PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); |
264 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); | 300 | ep->com.flags |= RELEASE_RESOURCES; |
265 | dst_release(ep->dst); | ||
266 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | ||
267 | put_ep(&ep->com); | 301 | put_ep(&ep->com); |
268 | } | 302 | } |
269 | 303 | ||
@@ -382,7 +416,7 @@ static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | |||
382 | 416 | ||
383 | PDBG("%s t3cdev %p\n", __func__, dev); | 417 | PDBG("%s t3cdev %p\n", __func__, dev); |
384 | req->cmd = CPL_ABORT_NO_RST; | 418 | req->cmd = CPL_ABORT_NO_RST; |
385 | cxgb3_ofld_send(dev, skb); | 419 | iwch_cxgb3_ofld_send(dev, skb); |
386 | } | 420 | } |
387 | 421 | ||
388 | static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) | 422 | static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) |
@@ -402,8 +436,7 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) | |||
402 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); | 436 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); |
403 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | 437 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); |
404 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); | 438 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); |
405 | l2t_send(ep->com.tdev, skb, ep->l2t); | 439 | return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); |
406 | return 0; | ||
407 | } | 440 | } |
408 | 441 | ||
409 | static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) | 442 | static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) |
@@ -424,8 +457,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) | |||
424 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | 457 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); |
425 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); | 458 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); |
426 | req->cmd = CPL_ABORT_SEND_RST; | 459 | req->cmd = CPL_ABORT_SEND_RST; |
427 | l2t_send(ep->com.tdev, skb, ep->l2t); | 460 | return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); |
428 | return 0; | ||
429 | } | 461 | } |
430 | 462 | ||
431 | static int send_connect(struct iwch_ep *ep) | 463 | static int send_connect(struct iwch_ep *ep) |
@@ -469,8 +501,7 @@ static int send_connect(struct iwch_ep *ep) | |||
469 | req->opt0l = htonl(opt0l); | 501 | req->opt0l = htonl(opt0l); |
470 | req->params = 0; | 502 | req->params = 0; |
471 | req->opt2 = htonl(opt2); | 503 | req->opt2 = htonl(opt2); |
472 | l2t_send(ep->com.tdev, skb, ep->l2t); | 504 | return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); |
473 | return 0; | ||
474 | } | 505 | } |
475 | 506 | ||
476 | static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) | 507 | static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) |
@@ -527,7 +558,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) | |||
527 | req->sndseq = htonl(ep->snd_seq); | 558 | req->sndseq = htonl(ep->snd_seq); |
528 | BUG_ON(ep->mpa_skb); | 559 | BUG_ON(ep->mpa_skb); |
529 | ep->mpa_skb = skb; | 560 | ep->mpa_skb = skb; |
530 | l2t_send(ep->com.tdev, skb, ep->l2t); | 561 | iwch_l2t_send(ep->com.tdev, skb, ep->l2t); |
531 | start_ep_timer(ep); | 562 | start_ep_timer(ep); |
532 | state_set(&ep->com, MPA_REQ_SENT); | 563 | state_set(&ep->com, MPA_REQ_SENT); |
533 | return; | 564 | return; |
@@ -578,8 +609,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) | |||
578 | req->sndseq = htonl(ep->snd_seq); | 609 | req->sndseq = htonl(ep->snd_seq); |
579 | BUG_ON(ep->mpa_skb); | 610 | BUG_ON(ep->mpa_skb); |
580 | ep->mpa_skb = skb; | 611 | ep->mpa_skb = skb; |
581 | l2t_send(ep->com.tdev, skb, ep->l2t); | 612 | return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); |
582 | return 0; | ||
583 | } | 613 | } |
584 | 614 | ||
585 | static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) | 615 | static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) |
@@ -630,8 +660,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) | |||
630 | req->sndseq = htonl(ep->snd_seq); | 660 | req->sndseq = htonl(ep->snd_seq); |
631 | ep->mpa_skb = skb; | 661 | ep->mpa_skb = skb; |
632 | state_set(&ep->com, MPA_REP_SENT); | 662 | state_set(&ep->com, MPA_REP_SENT); |
633 | l2t_send(ep->com.tdev, skb, ep->l2t); | 663 | return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); |
634 | return 0; | ||
635 | } | 664 | } |
636 | 665 | ||
637 | static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | 666 | static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) |
@@ -795,7 +824,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits) | |||
795 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); | 824 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); |
796 | req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); | 825 | req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); |
797 | skb->priority = CPL_PRIORITY_ACK; | 826 | skb->priority = CPL_PRIORITY_ACK; |
798 | cxgb3_ofld_send(ep->com.tdev, skb); | 827 | iwch_cxgb3_ofld_send(ep->com.tdev, skb); |
799 | return credits; | 828 | return credits; |
800 | } | 829 | } |
801 | 830 | ||
@@ -1127,8 +1156,8 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1127 | * We get 2 abort replies from the HW. The first one must | 1156 | * We get 2 abort replies from the HW. The first one must |
1128 | * be ignored except for scribbling that we need one more. | 1157 | * be ignored except for scribbling that we need one more. |
1129 | */ | 1158 | */ |
1130 | if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) { | 1159 | if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) { |
1131 | ep->flags |= ABORT_REQ_IN_PROGRESS; | 1160 | ep->com.flags |= ABORT_REQ_IN_PROGRESS; |
1132 | return CPL_RET_BUF_DONE; | 1161 | return CPL_RET_BUF_DONE; |
1133 | } | 1162 | } |
1134 | 1163 | ||
@@ -1203,8 +1232,7 @@ static int listen_start(struct iwch_listen_ep *ep) | |||
1203 | req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); | 1232 | req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); |
1204 | 1233 | ||
1205 | skb->priority = 1; | 1234 | skb->priority = 1; |
1206 | cxgb3_ofld_send(ep->com.tdev, skb); | 1235 | return iwch_cxgb3_ofld_send(ep->com.tdev, skb); |
1207 | return 0; | ||
1208 | } | 1236 | } |
1209 | 1237 | ||
1210 | static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | 1238 | static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) |
@@ -1237,8 +1265,7 @@ static int listen_stop(struct iwch_listen_ep *ep) | |||
1237 | req->cpu_idx = 0; | 1265 | req->cpu_idx = 0; |
1238 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); | 1266 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); |
1239 | skb->priority = 1; | 1267 | skb->priority = 1; |
1240 | cxgb3_ofld_send(ep->com.tdev, skb); | 1268 | return iwch_cxgb3_ofld_send(ep->com.tdev, skb); |
1241 | return 0; | ||
1242 | } | 1269 | } |
1243 | 1270 | ||
1244 | static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb, | 1271 | static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb, |
@@ -1286,7 +1313,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb) | |||
1286 | rpl->opt2 = htonl(opt2); | 1313 | rpl->opt2 = htonl(opt2); |
1287 | rpl->rsvd = rpl->opt2; /* workaround for HW bug */ | 1314 | rpl->rsvd = rpl->opt2; /* workaround for HW bug */ |
1288 | skb->priority = CPL_PRIORITY_SETUP; | 1315 | skb->priority = CPL_PRIORITY_SETUP; |
1289 | l2t_send(ep->com.tdev, skb, ep->l2t); | 1316 | iwch_l2t_send(ep->com.tdev, skb, ep->l2t); |
1290 | 1317 | ||
1291 | return; | 1318 | return; |
1292 | } | 1319 | } |
@@ -1315,7 +1342,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, | |||
1315 | rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); | 1342 | rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); |
1316 | rpl->opt2 = 0; | 1343 | rpl->opt2 = 0; |
1317 | rpl->rsvd = rpl->opt2; | 1344 | rpl->rsvd = rpl->opt2; |
1318 | cxgb3_ofld_send(tdev, skb); | 1345 | iwch_cxgb3_ofld_send(tdev, skb); |
1319 | } | 1346 | } |
1320 | } | 1347 | } |
1321 | 1348 | ||
@@ -1534,8 +1561,8 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1534 | * We get 2 peer aborts from the HW. The first one must | 1561 | * We get 2 peer aborts from the HW. The first one must |
1535 | * be ignored except for scribbling that we need one more. | 1562 | * be ignored except for scribbling that we need one more. |
1536 | */ | 1563 | */ |
1537 | if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) { | 1564 | if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) { |
1538 | ep->flags |= PEER_ABORT_IN_PROGRESS; | 1565 | ep->com.flags |= PEER_ABORT_IN_PROGRESS; |
1539 | return CPL_RET_BUF_DONE; | 1566 | return CPL_RET_BUF_DONE; |
1540 | } | 1567 | } |
1541 | 1568 | ||
@@ -1613,7 +1640,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1613 | rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | 1640 | rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); |
1614 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); | 1641 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); |
1615 | rpl->cmd = CPL_ABORT_NO_RST; | 1642 | rpl->cmd = CPL_ABORT_NO_RST; |
1616 | cxgb3_ofld_send(ep->com.tdev, rpl_skb); | 1643 | iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb); |
1617 | out: | 1644 | out: |
1618 | if (release) | 1645 | if (release) |
1619 | release_ep_resources(ep); | 1646 | release_ep_resources(ep); |
@@ -2017,8 +2044,11 @@ int iwch_destroy_listen(struct iw_cm_id *cm_id) | |||
2017 | ep->com.rpl_done = 0; | 2044 | ep->com.rpl_done = 0; |
2018 | ep->com.rpl_err = 0; | 2045 | ep->com.rpl_err = 0; |
2019 | err = listen_stop(ep); | 2046 | err = listen_stop(ep); |
2047 | if (err) | ||
2048 | goto done; | ||
2020 | wait_event(ep->com.waitq, ep->com.rpl_done); | 2049 | wait_event(ep->com.waitq, ep->com.rpl_done); |
2021 | cxgb3_free_stid(ep->com.tdev, ep->stid); | 2050 | cxgb3_free_stid(ep->com.tdev, ep->stid); |
2051 | done: | ||
2022 | err = ep->com.rpl_err; | 2052 | err = ep->com.rpl_err; |
2023 | cm_id->rem_ref(cm_id); | 2053 | cm_id->rem_ref(cm_id); |
2024 | put_ep(&ep->com); | 2054 | put_ep(&ep->com); |
@@ -2030,12 +2060,22 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) | |||
2030 | int ret=0; | 2060 | int ret=0; |
2031 | unsigned long flags; | 2061 | unsigned long flags; |
2032 | int close = 0; | 2062 | int close = 0; |
2063 | int fatal = 0; | ||
2064 | struct t3cdev *tdev; | ||
2065 | struct cxio_rdev *rdev; | ||
2033 | 2066 | ||
2034 | spin_lock_irqsave(&ep->com.lock, flags); | 2067 | spin_lock_irqsave(&ep->com.lock, flags); |
2035 | 2068 | ||
2036 | PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, | 2069 | PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, |
2037 | states[ep->com.state], abrupt); | 2070 | states[ep->com.state], abrupt); |
2038 | 2071 | ||
2072 | tdev = (struct t3cdev *)ep->com.tdev; | ||
2073 | rdev = (struct cxio_rdev *)tdev->ulp; | ||
2074 | if (cxio_fatal_error(rdev)) { | ||
2075 | fatal = 1; | ||
2076 | close_complete_upcall(ep); | ||
2077 | ep->com.state = DEAD; | ||
2078 | } | ||
2039 | switch (ep->com.state) { | 2079 | switch (ep->com.state) { |
2040 | case MPA_REQ_WAIT: | 2080 | case MPA_REQ_WAIT: |
2041 | case MPA_REQ_SENT: | 2081 | case MPA_REQ_SENT: |
@@ -2075,7 +2115,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) | |||
2075 | ret = send_abort(ep, NULL, gfp); | 2115 | ret = send_abort(ep, NULL, gfp); |
2076 | else | 2116 | else |
2077 | ret = send_halfclose(ep, gfp); | 2117 | ret = send_halfclose(ep, gfp); |
2118 | if (ret) | ||
2119 | fatal = 1; | ||
2078 | } | 2120 | } |
2121 | if (fatal) | ||
2122 | release_ep_resources(ep); | ||
2079 | return ret; | 2123 | return ret; |
2080 | } | 2124 | } |
2081 | 2125 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h index d7c7e09f0996..43c0aea7eadc 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.h +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h | |||
@@ -147,6 +147,7 @@ enum iwch_ep_state { | |||
147 | enum iwch_ep_flags { | 147 | enum iwch_ep_flags { |
148 | PEER_ABORT_IN_PROGRESS = (1 << 0), | 148 | PEER_ABORT_IN_PROGRESS = (1 << 0), |
149 | ABORT_REQ_IN_PROGRESS = (1 << 1), | 149 | ABORT_REQ_IN_PROGRESS = (1 << 1), |
150 | RELEASE_RESOURCES = (1 << 2), | ||
150 | }; | 151 | }; |
151 | 152 | ||
152 | struct iwch_ep_common { | 153 | struct iwch_ep_common { |
@@ -161,6 +162,7 @@ struct iwch_ep_common { | |||
161 | wait_queue_head_t waitq; | 162 | wait_queue_head_t waitq; |
162 | int rpl_done; | 163 | int rpl_done; |
163 | int rpl_err; | 164 | int rpl_err; |
165 | u32 flags; | ||
164 | }; | 166 | }; |
165 | 167 | ||
166 | struct iwch_listen_ep { | 168 | struct iwch_listen_ep { |
@@ -188,7 +190,6 @@ struct iwch_ep { | |||
188 | u16 plen; | 190 | u16 plen; |
189 | u32 ird; | 191 | u32 ird; |
190 | u32 ord; | 192 | u32 ord; |
191 | u32 flags; | ||
192 | }; | 193 | }; |
193 | 194 | ||
194 | static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) | 195 | static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index c758fbd58478..2f546a625330 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -751,7 +751,7 @@ int iwch_post_zb_read(struct iwch_qp *qhp) | |||
751 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| | 751 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| |
752 | V_FW_RIWR_LEN(flit_cnt)); | 752 | V_FW_RIWR_LEN(flit_cnt)); |
753 | skb->priority = CPL_PRIORITY_DATA; | 753 | skb->priority = CPL_PRIORITY_DATA; |
754 | return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); | 754 | return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); |
755 | } | 755 | } |
756 | 756 | ||
757 | /* | 757 | /* |
@@ -783,7 +783,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) | |||
783 | V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG)); | 783 | V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG)); |
784 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)); | 784 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)); |
785 | skb->priority = CPL_PRIORITY_DATA; | 785 | skb->priority = CPL_PRIORITY_DATA; |
786 | return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); | 786 | return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); |
787 | } | 787 | } |
788 | 788 | ||
789 | /* | 789 | /* |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 2ccb9d31771f..ae3d7590346e 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -394,8 +394,7 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
394 | PAGE_SIZE, vma->vm_page_prot)) | 394 | PAGE_SIZE, vma->vm_page_prot)) |
395 | return -EAGAIN; | 395 | return -EAGAIN; |
396 | } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { | 396 | } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { |
397 | /* FIXME want pgprot_writecombine() for BlueFlame pages */ | 397 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
398 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
399 | 398 | ||
400 | if (io_remap_pfn_range(vma, vma->vm_start, | 399 | if (io_remap_pfn_range(vma, vma->vm_start, |
401 | to_mucontext(context)->uar.pfn + | 400 | to_mucontext(context)->uar.pfn + |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 04b12ad23390..17621de54a9f 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -289,8 +289,8 @@ static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad) | |||
289 | static inline void | 289 | static inline void |
290 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) | 290 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) |
291 | { | 291 | { |
292 | wqe_words[index] = cpu_to_le32((u32) ((unsigned long)value)); | 292 | wqe_words[index] = cpu_to_le32((u32) value); |
293 | wqe_words[index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)value))); | 293 | wqe_words[index + 1] = cpu_to_le32(upper_32_bits(value)); |
294 | } | 294 | } |
295 | 295 | ||
296 | static inline void | 296 | static inline void |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 52425154acd4..dbd9a75474e3 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -426,6 +426,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
426 | if (type == NES_TIMER_TYPE_CLOSE) { | 426 | if (type == NES_TIMER_TYPE_CLOSE) { |
427 | new_send->timetosend += (HZ/10); | 427 | new_send->timetosend += (HZ/10); |
428 | if (cm_node->recv_entry) { | 428 | if (cm_node->recv_entry) { |
429 | kfree(new_send); | ||
429 | WARN_ON(1); | 430 | WARN_ON(1); |
430 | return -EINVAL; | 431 | return -EINVAL; |
431 | } | 432 | } |
@@ -445,8 +446,8 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
445 | if (ret != NETDEV_TX_OK) { | 446 | if (ret != NETDEV_TX_OK) { |
446 | nes_debug(NES_DBG_CM, "Error sending packet %p " | 447 | nes_debug(NES_DBG_CM, "Error sending packet %p " |
447 | "(jiffies = %lu)\n", new_send, jiffies); | 448 | "(jiffies = %lu)\n", new_send, jiffies); |
448 | atomic_dec(&new_send->skb->users); | ||
449 | new_send->timetosend = jiffies; | 449 | new_send->timetosend = jiffies; |
450 | ret = NETDEV_TX_OK; | ||
450 | } else { | 451 | } else { |
451 | cm_packets_sent++; | 452 | cm_packets_sent++; |
452 | if (!send_retrans) { | 453 | if (!send_retrans) { |
@@ -630,7 +631,6 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
630 | nes_debug(NES_DBG_CM, "rexmit failed for " | 631 | nes_debug(NES_DBG_CM, "rexmit failed for " |
631 | "node=%p\n", cm_node); | 632 | "node=%p\n", cm_node); |
632 | cm_packets_bounced++; | 633 | cm_packets_bounced++; |
633 | atomic_dec(&send_entry->skb->users); | ||
634 | send_entry->retrycount--; | 634 | send_entry->retrycount--; |
635 | nexttimeout = jiffies + NES_SHORT_TIME; | 635 | nexttimeout = jiffies + NES_SHORT_TIME; |
636 | settimer = 1; | 636 | settimer = 1; |
@@ -666,11 +666,6 @@ static void nes_cm_timer_tick(unsigned long pass) | |||
666 | 666 | ||
667 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); | 667 | spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); |
668 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 668 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
669 | if (ret != NETDEV_TX_OK) { | ||
670 | nes_debug(NES_DBG_CM, "rexmit failed for cm_node=%p\n", | ||
671 | cm_node); | ||
672 | break; | ||
673 | } | ||
674 | } | 669 | } |
675 | 670 | ||
676 | if (settimer) { | 671 | if (settimer) { |
@@ -1262,7 +1257,6 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, | |||
1262 | cm_node->nesqp = NULL; | 1257 | cm_node->nesqp = NULL; |
1263 | } | 1258 | } |
1264 | 1259 | ||
1265 | cm_node->freed = 1; | ||
1266 | kfree(cm_node); | 1260 | kfree(cm_node); |
1267 | return 0; | 1261 | return 0; |
1268 | } | 1262 | } |
@@ -1999,13 +1993,17 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, | |||
1999 | if (loopbackremotelistener == NULL) { | 1993 | if (loopbackremotelistener == NULL) { |
2000 | create_event(cm_node, NES_CM_EVENT_ABORTED); | 1994 | create_event(cm_node, NES_CM_EVENT_ABORTED); |
2001 | } else { | 1995 | } else { |
2002 | atomic_inc(&cm_loopbacks); | ||
2003 | loopback_cm_info = *cm_info; | 1996 | loopback_cm_info = *cm_info; |
2004 | loopback_cm_info.loc_port = cm_info->rem_port; | 1997 | loopback_cm_info.loc_port = cm_info->rem_port; |
2005 | loopback_cm_info.rem_port = cm_info->loc_port; | 1998 | loopback_cm_info.rem_port = cm_info->loc_port; |
2006 | loopback_cm_info.cm_id = loopbackremotelistener->cm_id; | 1999 | loopback_cm_info.cm_id = loopbackremotelistener->cm_id; |
2007 | loopbackremotenode = make_cm_node(cm_core, nesvnic, | 2000 | loopbackremotenode = make_cm_node(cm_core, nesvnic, |
2008 | &loopback_cm_info, loopbackremotelistener); | 2001 | &loopback_cm_info, loopbackremotelistener); |
2002 | if (!loopbackremotenode) { | ||
2003 | rem_ref_cm_node(cm_node->cm_core, cm_node); | ||
2004 | return NULL; | ||
2005 | } | ||
2006 | atomic_inc(&cm_loopbacks); | ||
2009 | loopbackremotenode->loopbackpartner = cm_node; | 2007 | loopbackremotenode->loopbackpartner = cm_node; |
2010 | loopbackremotenode->tcp_cntxt.rcv_wscale = | 2008 | loopbackremotenode->tcp_cntxt.rcv_wscale = |
2011 | NES_CM_DEFAULT_RCV_WND_SCALE; | 2009 | NES_CM_DEFAULT_RCV_WND_SCALE; |
@@ -2690,6 +2688,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2690 | struct ib_mr *ibmr = NULL; | 2688 | struct ib_mr *ibmr = NULL; |
2691 | struct ib_phys_buf ibphysbuf; | 2689 | struct ib_phys_buf ibphysbuf; |
2692 | struct nes_pd *nespd; | 2690 | struct nes_pd *nespd; |
2691 | u64 tagged_offset; | ||
2693 | 2692 | ||
2694 | 2693 | ||
2695 | 2694 | ||
@@ -2755,10 +2754,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2755 | ibphysbuf.addr = nesqp->ietf_frame_pbase; | 2754 | ibphysbuf.addr = nesqp->ietf_frame_pbase; |
2756 | ibphysbuf.size = conn_param->private_data_len + | 2755 | ibphysbuf.size = conn_param->private_data_len + |
2757 | sizeof(struct ietf_mpa_frame); | 2756 | sizeof(struct ietf_mpa_frame); |
2757 | tagged_offset = (u64)(unsigned long)nesqp->ietf_frame; | ||
2758 | ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, | 2758 | ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, |
2759 | &ibphysbuf, 1, | 2759 | &ibphysbuf, 1, |
2760 | IB_ACCESS_LOCAL_WRITE, | 2760 | IB_ACCESS_LOCAL_WRITE, |
2761 | (u64 *)&nesqp->ietf_frame); | 2761 | &tagged_offset); |
2762 | if (!ibmr) { | 2762 | if (!ibmr) { |
2763 | nes_debug(NES_DBG_CM, "Unable to register memory region" | 2763 | nes_debug(NES_DBG_CM, "Unable to register memory region" |
2764 | "for lSMM for cm_node = %p \n", | 2764 | "for lSMM for cm_node = %p \n", |
@@ -2782,7 +2782,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2782 | sizeof(struct ietf_mpa_frame)); | 2782 | sizeof(struct ietf_mpa_frame)); |
2783 | set_wqe_64bit_value(wqe->wqe_words, | 2783 | set_wqe_64bit_value(wqe->wqe_words, |
2784 | NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, | 2784 | NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, |
2785 | (u64)nesqp->ietf_frame); | 2785 | (u64)(unsigned long)nesqp->ietf_frame); |
2786 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = | 2786 | wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = |
2787 | cpu_to_le32(conn_param->private_data_len + | 2787 | cpu_to_le32(conn_param->private_data_len + |
2788 | sizeof(struct ietf_mpa_frame)); | 2788 | sizeof(struct ietf_mpa_frame)); |
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index d5f778202eb7..80bba1892571 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h | |||
@@ -298,7 +298,6 @@ struct nes_cm_node { | |||
298 | struct nes_vnic *nesvnic; | 298 | struct nes_vnic *nesvnic; |
299 | int apbvt_set; | 299 | int apbvt_set; |
300 | int accept_pend; | 300 | int accept_pend; |
301 | int freed; | ||
302 | struct list_head timer_entry; | 301 | struct list_head timer_entry; |
303 | struct list_head reset_entry; | 302 | struct list_head reset_entry; |
304 | struct nes_qp *nesqp; | 303 | struct nes_qp *nesqp; |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 52e734042b8e..d6fc9ae44062 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -46,6 +46,10 @@ static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR; | |||
46 | module_param(nes_lro_max_aggr, uint, 0444); | 46 | module_param(nes_lro_max_aggr, uint, 0444); |
47 | MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation"); | 47 | MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation"); |
48 | 48 | ||
49 | static int wide_ppm_offset; | ||
50 | module_param(wide_ppm_offset, int, 0644); | ||
51 | MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm"); | ||
52 | |||
49 | static u32 crit_err_count; | 53 | static u32 crit_err_count; |
50 | u32 int_mod_timer_init; | 54 | u32 int_mod_timer_init; |
51 | u32 int_mod_cq_depth_256; | 55 | u32 int_mod_cq_depth_256; |
@@ -546,8 +550,11 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { | |||
546 | msleep(1); | 550 | msleep(1); |
547 | } | 551 | } |
548 | if (int_cnt > 1) { | 552 | if (int_cnt > 1) { |
553 | u32 sds; | ||
549 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | 554 | spin_lock_irqsave(&nesadapter->phy_lock, flags); |
550 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088); | 555 | sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); |
556 | sds |= 0x00000040; | ||
557 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds); | ||
551 | mh_detected++; | 558 | mh_detected++; |
552 | reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); | 559 | reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); |
553 | reset_value |= 0x0000003d; | 560 | reset_value |= 0x0000003d; |
@@ -736,39 +743,49 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, | |||
736 | { | 743 | { |
737 | int i; | 744 | int i; |
738 | u32 u32temp; | 745 | u32 u32temp; |
739 | u32 serdes_common_control; | 746 | u32 sds; |
740 | 747 | ||
741 | if (hw_rev != NE020_REV) { | 748 | if (hw_rev != NE020_REV) { |
742 | /* init serdes 0 */ | 749 | /* init serdes 0 */ |
750 | if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4)) | ||
751 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA); | ||
752 | else | ||
753 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); | ||
743 | 754 | ||
744 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); | ||
745 | if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { | 755 | if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { |
746 | serdes_common_control = nes_read_indexed(nesdev, | 756 | sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0); |
747 | NES_IDX_ETH_SERDES_COMMON_CONTROL0); | 757 | sds |= 0x00000100; |
748 | serdes_common_control |= 0x000000100; | 758 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds); |
749 | nes_write_indexed(nesdev, | ||
750 | NES_IDX_ETH_SERDES_COMMON_CONTROL0, | ||
751 | serdes_common_control); | ||
752 | } else if (!OneG_Mode) { | ||
753 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); | ||
754 | } | 759 | } |
755 | if (((port_count > 1) && | 760 | if (!OneG_Mode) |
756 | (nesadapter->phy_type[0] != NES_PHY_TYPE_PUMA_1G)) || | 761 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); |
757 | ((port_count > 2) && | 762 | |
758 | (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G))) { | 763 | if (port_count < 2) |
759 | /* init serdes 1 */ | 764 | return 0; |
760 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF); | 765 | |
761 | if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { | 766 | /* init serdes 1 */ |
762 | serdes_common_control = nes_read_indexed(nesdev, | 767 | switch (nesadapter->phy_type[1]) { |
763 | NES_IDX_ETH_SERDES_COMMON_CONTROL1); | 768 | case NES_PHY_TYPE_ARGUS: |
764 | serdes_common_control |= 0x000000100; | 769 | case NES_PHY_TYPE_SFP_D: |
765 | nes_write_indexed(nesdev, | 770 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000); |
766 | NES_IDX_ETH_SERDES_COMMON_CONTROL1, | 771 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000); |
767 | serdes_common_control); | 772 | break; |
768 | } else if (!OneG_Mode) { | 773 | case NES_PHY_TYPE_CX4: |
769 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000); | 774 | sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); |
770 | } | 775 | sds &= 0xFFFFFFBF; |
776 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds); | ||
777 | if (wide_ppm_offset) | ||
778 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA); | ||
779 | else | ||
780 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF); | ||
781 | break; | ||
782 | case NES_PHY_TYPE_PUMA_1G: | ||
783 | sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); | ||
784 | sds |= 0x000000100; | ||
785 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds); | ||
771 | } | 786 | } |
787 | if (!OneG_Mode) | ||
788 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000); | ||
772 | } else { | 789 | } else { |
773 | /* init serdes 0 */ | 790 | /* init serdes 0 */ |
774 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); | 791 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); |
@@ -1259,203 +1276,162 @@ int nes_init_phy(struct nes_device *nesdev) | |||
1259 | { | 1276 | { |
1260 | struct nes_adapter *nesadapter = nesdev->nesadapter; | 1277 | struct nes_adapter *nesadapter = nesdev->nesadapter; |
1261 | u32 counter = 0; | 1278 | u32 counter = 0; |
1262 | u32 sds_common_control0; | 1279 | u32 sds; |
1263 | u32 mac_index = nesdev->mac_index; | 1280 | u32 mac_index = nesdev->mac_index; |
1264 | u32 tx_config = 0; | 1281 | u32 tx_config = 0; |
1265 | u16 phy_data; | 1282 | u16 phy_data; |
1266 | u32 temp_phy_data = 0; | 1283 | u32 temp_phy_data = 0; |
1267 | u32 temp_phy_data2 = 0; | 1284 | u32 temp_phy_data2 = 0; |
1268 | u32 i = 0; | 1285 | u8 phy_type = nesadapter->phy_type[mac_index]; |
1286 | u8 phy_index = nesadapter->phy_index[mac_index]; | ||
1269 | 1287 | ||
1270 | if ((nesadapter->OneG_Mode) && | 1288 | if ((nesadapter->OneG_Mode) && |
1271 | (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { | 1289 | (phy_type != NES_PHY_TYPE_PUMA_1G)) { |
1272 | nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index); | 1290 | nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index); |
1273 | if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) { | 1291 | if (phy_type == NES_PHY_TYPE_1G) { |
1274 | printk(PFX "%s: Programming mdc config for 1G\n", __func__); | ||
1275 | tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); | 1292 | tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); |
1276 | tx_config &= 0xFFFFFFE3; | 1293 | tx_config &= 0xFFFFFFE3; |
1277 | tx_config |= 0x04; | 1294 | tx_config |= 0x04; |
1278 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); | 1295 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); |
1279 | } | 1296 | } |
1280 | 1297 | ||
1281 | nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data); | 1298 | nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data); |
1282 | nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n", | 1299 | nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000); |
1283 | nesadapter->phy_index[mac_index], phy_data); | ||
1284 | nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000); | ||
1285 | 1300 | ||
1286 | /* Reset the PHY */ | 1301 | /* Reset the PHY */ |
1287 | nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000); | 1302 | nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000); |
1288 | udelay(100); | 1303 | udelay(100); |
1289 | counter = 0; | 1304 | counter = 0; |
1290 | do { | 1305 | do { |
1291 | nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); | 1306 | nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); |
1292 | nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data); | 1307 | if (counter++ > 100) |
1293 | if (counter++ > 100) break; | 1308 | break; |
1294 | } while (phy_data & 0x8000); | 1309 | } while (phy_data & 0x8000); |
1295 | 1310 | ||
1296 | /* Setting no phy loopback */ | 1311 | /* Setting no phy loopback */ |
1297 | phy_data &= 0xbfff; | 1312 | phy_data &= 0xbfff; |
1298 | phy_data |= 0x1140; | 1313 | phy_data |= 0x1140; |
1299 | nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data); | 1314 | nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data); |
1300 | nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); | 1315 | nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); |
1301 | nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data); | 1316 | nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data); |
1302 | 1317 | nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data); | |
1303 | nes_read_1G_phy_reg(nesdev, 0x17, nesadapter->phy_index[mac_index], &phy_data); | ||
1304 | nes_debug(NES_DBG_PHY, "Phy data from register 0x17 = 0x%X.\n", phy_data); | ||
1305 | |||
1306 | nes_read_1G_phy_reg(nesdev, 0x1e, nesadapter->phy_index[mac_index], &phy_data); | ||
1307 | nes_debug(NES_DBG_PHY, "Phy data from register 0x1e = 0x%X.\n", phy_data); | ||
1308 | 1318 | ||
1309 | /* Setting the interrupt mask */ | 1319 | /* Setting the interrupt mask */ |
1310 | nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data); | 1320 | nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); |
1311 | nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data); | 1321 | nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee); |
1312 | nes_write_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], 0xffee); | 1322 | nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); |
1313 | |||
1314 | nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data); | ||
1315 | nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data); | ||
1316 | 1323 | ||
1317 | /* turning on flow control */ | 1324 | /* turning on flow control */ |
1318 | nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data); | 1325 | nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); |
1319 | nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data); | 1326 | nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00); |
1320 | nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], | 1327 | nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); |
1321 | (phy_data & ~(0x03E0)) | 0xc00); | ||
1322 | /* nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], | ||
1323 | phy_data | 0xc00); */ | ||
1324 | nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data); | ||
1325 | nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data); | ||
1326 | |||
1327 | nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data); | ||
1328 | nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data); | ||
1329 | /* Clear Half duplex */ | ||
1330 | nes_write_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], | ||
1331 | phy_data & ~(0x0100)); | ||
1332 | nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data); | ||
1333 | nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data); | ||
1334 | |||
1335 | nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); | ||
1336 | nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300); | ||
1337 | } else { | ||
1338 | if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) || | ||
1339 | (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) { | ||
1340 | /* setup 10G MDIO operation */ | ||
1341 | tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); | ||
1342 | tx_config &= 0xFFFFFFE3; | ||
1343 | tx_config |= 0x15; | ||
1344 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); | ||
1345 | } | ||
1346 | if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) { | ||
1347 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); | ||
1348 | 1328 | ||
1349 | temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | 1329 | /* Clear Half duplex */ |
1350 | mdelay(10); | 1330 | nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); |
1351 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); | 1331 | nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100)); |
1352 | temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | 1332 | nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); |
1353 | 1333 | ||
1354 | /* | 1334 | nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); |
1355 | * if firmware is already running (like from a | 1335 | nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300); |
1356 | * driver un-load/load, don't do anything. | ||
1357 | */ | ||
1358 | if (temp_phy_data == temp_phy_data2) { | ||
1359 | /* configure QT2505 AMCC PHY */ | ||
1360 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0x0000, 0x8000); | ||
1361 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0000); | ||
1362 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc302, 0x0044); | ||
1363 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc318, 0x0052); | ||
1364 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008); | ||
1365 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098); | ||
1366 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00); | ||
1367 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0001); | ||
1368 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528); | ||
1369 | 1336 | ||
1370 | /* | 1337 | return 0; |
1371 | * remove micro from reset; chip boots from ROM, | 1338 | } |
1372 | * uploads EEPROM f/w image, uC executes f/w | ||
1373 | */ | ||
1374 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0002); | ||
1375 | 1339 | ||
1376 | /* | 1340 | if ((phy_type == NES_PHY_TYPE_IRIS) || |
1377 | * wait for heart beat to start to | 1341 | (phy_type == NES_PHY_TYPE_ARGUS) || |
1378 | * know loading is done | 1342 | (phy_type == NES_PHY_TYPE_SFP_D)) { |
1379 | */ | 1343 | /* setup 10G MDIO operation */ |
1380 | counter = 0; | 1344 | tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); |
1381 | do { | 1345 | tx_config &= 0xFFFFFFE3; |
1382 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); | 1346 | tx_config |= 0x15; |
1383 | temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | 1347 | nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); |
1384 | if (counter++ > 1000) { | 1348 | } |
1385 | nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from heartbeat check <this is bad!!!> \n"); | 1349 | if ((phy_type == NES_PHY_TYPE_ARGUS) || |
1386 | break; | 1350 | (phy_type == NES_PHY_TYPE_SFP_D)) { |
1387 | } | 1351 | /* Check firmware heartbeat */ |
1388 | mdelay(100); | 1352 | nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); |
1389 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); | 1353 | temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); |
1390 | temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | 1354 | udelay(1500); |
1391 | } while ((temp_phy_data2 == temp_phy_data)); | 1355 | nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); |
1356 | temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | ||
1357 | |||
1358 | if (temp_phy_data != temp_phy_data2) | ||
1359 | return 0; | ||
1392 | 1360 | ||
1393 | /* | 1361 | /* no heartbeat, configure the PHY */ |
1394 | * wait for tracking to start to know | 1362 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000); |
1395 | * f/w is good to go | 1363 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000); |
1396 | */ | 1364 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); |
1397 | counter = 0; | 1365 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); |
1398 | do { | 1366 | if (phy_type == NES_PHY_TYPE_ARGUS) { |
1399 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7fd); | 1367 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C); |
1400 | temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | 1368 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008); |
1401 | if (counter++ > 1000) { | 1369 | } else { |
1402 | nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from status check <this is bad!!!> \n"); | 1370 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004); |
1403 | break; | 1371 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038); |
1404 | } | 1372 | } |
1405 | mdelay(1000); | 1373 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098); |
1406 | /* | 1374 | nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); |
1407 | * nes_debug(NES_DBG_PHY, "AMCC PHY- phy_status not ready yet = 0x%02X\n", | 1375 | nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001); |
1408 | * temp_phy_data); | ||
1409 | */ | ||
1410 | } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70)); | ||
1411 | |||
1412 | /* set LOS Control invert RXLOSB_I_PADINV */ | ||
1413 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd003, 0x0000); | ||
1414 | /* set LOS Control to mask of RXLOSB_I */ | ||
1415 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc314, 0x0042); | ||
1416 | /* set LED1 to input mode (LED1 and LED2 share same LED) */ | ||
1417 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd006, 0x0007); | ||
1418 | /* set LED2 to RX link_status and activity */ | ||
1419 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd007, 0x000A); | ||
1420 | /* set LED3 to RX link_status */ | ||
1421 | nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd008, 0x0009); | ||
1422 | 1376 | ||
1423 | /* | 1377 | /* setup LEDs */ |
1424 | * reset the res-calibration on t2 | 1378 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007); |
1425 | * serdes; ensures it is stable after | 1379 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A); |
1426 | * the amcc phy is stable | 1380 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009); |
1427 | */ | ||
1428 | 1381 | ||
1429 | sds_common_control0 = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0); | 1382 | nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528); |
1430 | sds_common_control0 |= 0x1; | ||
1431 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0); | ||
1432 | 1383 | ||
1433 | /* release the res-calibration reset */ | 1384 | /* Bring PHY out of reset */ |
1434 | sds_common_control0 &= 0xfffffffe; | 1385 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002); |
1435 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0); | ||
1436 | 1386 | ||
1437 | i = 0; | 1387 | /* Check for heartbeat */ |
1438 | while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) | 1388 | counter = 0; |
1439 | && (i++ < 5000)) { | 1389 | mdelay(690); |
1440 | /* mdelay(1); */ | 1390 | nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); |
1441 | } | 1391 | temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); |
1392 | do { | ||
1393 | if (counter++ > 150) { | ||
1394 | nes_debug(NES_DBG_PHY, "No PHY heartbeat\n"); | ||
1395 | break; | ||
1396 | } | ||
1397 | mdelay(1); | ||
1398 | nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); | ||
1399 | temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | ||
1400 | } while ((temp_phy_data2 == temp_phy_data)); | ||
1442 | 1401 | ||
1443 | /* | 1402 | /* wait for tracking */ |
1444 | * wait for link train done before moving on, | 1403 | counter = 0; |
1445 | * or will get an interupt storm | 1404 | do { |
1446 | */ | 1405 | nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); |
1447 | counter = 0; | 1406 | temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); |
1448 | do { | 1407 | if (counter++ > 300) { |
1449 | temp_phy_data = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + | 1408 | nes_debug(NES_DBG_PHY, "PHY did not track\n"); |
1450 | (0x200 * (nesdev->mac_index & 1))); | 1409 | break; |
1451 | if (counter++ > 1000) { | ||
1452 | nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from link train wait <this is bad, link didnt train!!!>\n"); | ||
1453 | break; | ||
1454 | } | ||
1455 | mdelay(1); | ||
1456 | } while (((temp_phy_data & 0x0f1f0000) != 0x0f0f0000)); | ||
1457 | } | 1410 | } |
1458 | } | 1411 | mdelay(10); |
1412 | } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70)); | ||
1413 | |||
1414 | /* setup signal integrity */ | ||
1415 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000); | ||
1416 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE); | ||
1417 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032); | ||
1418 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002); | ||
1419 | nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063); | ||
1420 | |||
1421 | /* reset serdes */ | ||
1422 | sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + | ||
1423 | mac_index * 0x200); | ||
1424 | sds |= 0x1; | ||
1425 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + | ||
1426 | mac_index * 0x200, sds); | ||
1427 | sds &= 0xfffffffe; | ||
1428 | nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + | ||
1429 | mac_index * 0x200, sds); | ||
1430 | |||
1431 | counter = 0; | ||
1432 | while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) | ||
1433 | && (counter++ < 5000)) | ||
1434 | ; | ||
1459 | } | 1435 | } |
1460 | return 0; | 1436 | return 0; |
1461 | } | 1437 | } |
@@ -2359,6 +2335,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2359 | u16 temp_phy_data; | 2335 | u16 temp_phy_data; |
2360 | u32 pcs_val = 0x0f0f0000; | 2336 | u32 pcs_val = 0x0f0f0000; |
2361 | u32 pcs_mask = 0x0f1f0000; | 2337 | u32 pcs_mask = 0x0f1f0000; |
2338 | u32 cdr_ctrl; | ||
2362 | 2339 | ||
2363 | spin_lock_irqsave(&nesadapter->phy_lock, flags); | 2340 | spin_lock_irqsave(&nesadapter->phy_lock, flags); |
2364 | if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) { | 2341 | if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) { |
@@ -2473,6 +2450,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2473 | break; | 2450 | break; |
2474 | 2451 | ||
2475 | case NES_PHY_TYPE_ARGUS: | 2452 | case NES_PHY_TYPE_ARGUS: |
2453 | case NES_PHY_TYPE_SFP_D: | ||
2476 | /* clear the alarms */ | 2454 | /* clear the alarms */ |
2477 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008); | 2455 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008); |
2478 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001); | 2456 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001); |
@@ -2483,19 +2461,18 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2483 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004); | 2461 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004); |
2484 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005); | 2462 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005); |
2485 | /* check link status */ | 2463 | /* check link status */ |
2486 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1); | 2464 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003); |
2487 | temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | 2465 | temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); |
2488 | u32temp = 100; | ||
2489 | do { | ||
2490 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1); | ||
2491 | 2466 | ||
2492 | phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); | 2467 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); |
2493 | if ((phy_data == temp_phy_data) || (!(--u32temp))) | 2468 | nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); |
2494 | break; | 2469 | nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); |
2495 | temp_phy_data = phy_data; | 2470 | phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); |
2496 | } while (1); | 2471 | |
2472 | phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; | ||
2473 | |||
2497 | nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", | 2474 | nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", |
2498 | __func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP"); | 2475 | __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP"); |
2499 | break; | 2476 | break; |
2500 | 2477 | ||
2501 | case NES_PHY_TYPE_PUMA_1G: | 2478 | case NES_PHY_TYPE_PUMA_1G: |
@@ -2511,6 +2488,17 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2511 | } | 2488 | } |
2512 | 2489 | ||
2513 | if (phy_data & 0x0004) { | 2490 | if (phy_data & 0x0004) { |
2491 | if (wide_ppm_offset && | ||
2492 | (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) && | ||
2493 | (nesadapter->hw_rev != NE020_REV)) { | ||
2494 | cdr_ctrl = nes_read_indexed(nesdev, | ||
2495 | NES_IDX_ETH_SERDES_CDR_CONTROL0 + | ||
2496 | mac_index * 0x200); | ||
2497 | nes_write_indexed(nesdev, | ||
2498 | NES_IDX_ETH_SERDES_CDR_CONTROL0 + | ||
2499 | mac_index * 0x200, | ||
2500 | cdr_ctrl | 0x000F0000); | ||
2501 | } | ||
2514 | nesadapter->mac_link_down[mac_index] = 0; | 2502 | nesadapter->mac_link_down[mac_index] = 0; |
2515 | list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { | 2503 | list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { |
2516 | nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n", | 2504 | nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n", |
@@ -2525,6 +2513,17 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2525 | } | 2513 | } |
2526 | } | 2514 | } |
2527 | } else { | 2515 | } else { |
2516 | if (wide_ppm_offset && | ||
2517 | (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) && | ||
2518 | (nesadapter->hw_rev != NE020_REV)) { | ||
2519 | cdr_ctrl = nes_read_indexed(nesdev, | ||
2520 | NES_IDX_ETH_SERDES_CDR_CONTROL0 + | ||
2521 | mac_index * 0x200); | ||
2522 | nes_write_indexed(nesdev, | ||
2523 | NES_IDX_ETH_SERDES_CDR_CONTROL0 + | ||
2524 | mac_index * 0x200, | ||
2525 | cdr_ctrl & 0xFFF0FFFF); | ||
2526 | } | ||
2528 | nesadapter->mac_link_down[mac_index] = 1; | 2527 | nesadapter->mac_link_down[mac_index] = 1; |
2529 | list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { | 2528 | list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { |
2530 | nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n", | 2529 | nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n", |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index f41a8710d2a8..c3654c6383fe 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -35,12 +35,14 @@ | |||
35 | 35 | ||
36 | #include <linux/inet_lro.h> | 36 | #include <linux/inet_lro.h> |
37 | 37 | ||
38 | #define NES_PHY_TYPE_CX4 1 | ||
38 | #define NES_PHY_TYPE_1G 2 | 39 | #define NES_PHY_TYPE_1G 2 |
39 | #define NES_PHY_TYPE_IRIS 3 | 40 | #define NES_PHY_TYPE_IRIS 3 |
40 | #define NES_PHY_TYPE_ARGUS 4 | 41 | #define NES_PHY_TYPE_ARGUS 4 |
41 | #define NES_PHY_TYPE_PUMA_1G 5 | 42 | #define NES_PHY_TYPE_PUMA_1G 5 |
42 | #define NES_PHY_TYPE_PUMA_10G 6 | 43 | #define NES_PHY_TYPE_PUMA_10G 6 |
43 | #define NES_PHY_TYPE_GLADIUS 7 | 44 | #define NES_PHY_TYPE_GLADIUS 7 |
45 | #define NES_PHY_TYPE_SFP_D 8 | ||
44 | 46 | ||
45 | #define NES_MULTICAST_PF_MAX 8 | 47 | #define NES_MULTICAST_PF_MAX 8 |
46 | 48 | ||
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index ecb1f6fd6276..c6e6611d3016 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -1426,49 +1426,55 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd | |||
1426 | struct nes_vnic *nesvnic = netdev_priv(netdev); | 1426 | struct nes_vnic *nesvnic = netdev_priv(netdev); |
1427 | struct nes_device *nesdev = nesvnic->nesdev; | 1427 | struct nes_device *nesdev = nesvnic->nesdev; |
1428 | struct nes_adapter *nesadapter = nesdev->nesadapter; | 1428 | struct nes_adapter *nesadapter = nesdev->nesadapter; |
1429 | u32 mac_index = nesdev->mac_index; | ||
1430 | u8 phy_type = nesadapter->phy_type[mac_index]; | ||
1431 | u8 phy_index = nesadapter->phy_index[mac_index]; | ||
1429 | u16 phy_data; | 1432 | u16 phy_data; |
1430 | 1433 | ||
1431 | et_cmd->duplex = DUPLEX_FULL; | 1434 | et_cmd->duplex = DUPLEX_FULL; |
1432 | et_cmd->port = PORT_MII; | 1435 | et_cmd->port = PORT_MII; |
1436 | et_cmd->maxtxpkt = 511; | ||
1437 | et_cmd->maxrxpkt = 511; | ||
1433 | 1438 | ||
1434 | if (nesadapter->OneG_Mode) { | 1439 | if (nesadapter->OneG_Mode) { |
1435 | et_cmd->speed = SPEED_1000; | 1440 | et_cmd->speed = SPEED_1000; |
1436 | if (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) { | 1441 | if (phy_type == NES_PHY_TYPE_PUMA_1G) { |
1437 | et_cmd->supported = SUPPORTED_1000baseT_Full; | 1442 | et_cmd->supported = SUPPORTED_1000baseT_Full; |
1438 | et_cmd->advertising = ADVERTISED_1000baseT_Full; | 1443 | et_cmd->advertising = ADVERTISED_1000baseT_Full; |
1439 | et_cmd->autoneg = AUTONEG_DISABLE; | 1444 | et_cmd->autoneg = AUTONEG_DISABLE; |
1440 | et_cmd->transceiver = XCVR_INTERNAL; | 1445 | et_cmd->transceiver = XCVR_INTERNAL; |
1441 | et_cmd->phy_address = nesdev->mac_index; | 1446 | et_cmd->phy_address = mac_index; |
1442 | } else { | 1447 | } else { |
1443 | et_cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg; | 1448 | et_cmd->supported = SUPPORTED_1000baseT_Full |
1444 | et_cmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg; | 1449 | | SUPPORTED_Autoneg; |
1445 | nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], &phy_data); | 1450 | et_cmd->advertising = ADVERTISED_1000baseT_Full |
1451 | | ADVERTISED_Autoneg; | ||
1452 | nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); | ||
1446 | if (phy_data & 0x1000) | 1453 | if (phy_data & 0x1000) |
1447 | et_cmd->autoneg = AUTONEG_ENABLE; | 1454 | et_cmd->autoneg = AUTONEG_ENABLE; |
1448 | else | 1455 | else |
1449 | et_cmd->autoneg = AUTONEG_DISABLE; | 1456 | et_cmd->autoneg = AUTONEG_DISABLE; |
1450 | et_cmd->transceiver = XCVR_EXTERNAL; | 1457 | et_cmd->transceiver = XCVR_EXTERNAL; |
1451 | et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index]; | 1458 | et_cmd->phy_address = phy_index; |
1452 | } | 1459 | } |
1460 | return 0; | ||
1461 | } | ||
1462 | if ((phy_type == NES_PHY_TYPE_IRIS) || | ||
1463 | (phy_type == NES_PHY_TYPE_ARGUS) || | ||
1464 | (phy_type == NES_PHY_TYPE_SFP_D)) { | ||
1465 | et_cmd->transceiver = XCVR_EXTERNAL; | ||
1466 | et_cmd->port = PORT_FIBRE; | ||
1467 | et_cmd->supported = SUPPORTED_FIBRE; | ||
1468 | et_cmd->advertising = ADVERTISED_FIBRE; | ||
1469 | et_cmd->phy_address = phy_index; | ||
1453 | } else { | 1470 | } else { |
1454 | if ((nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) || | 1471 | et_cmd->transceiver = XCVR_INTERNAL; |
1455 | (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_ARGUS)) { | 1472 | et_cmd->supported = SUPPORTED_10000baseT_Full; |
1456 | et_cmd->transceiver = XCVR_EXTERNAL; | 1473 | et_cmd->advertising = ADVERTISED_10000baseT_Full; |
1457 | et_cmd->port = PORT_FIBRE; | 1474 | et_cmd->phy_address = mac_index; |
1458 | et_cmd->supported = SUPPORTED_FIBRE; | ||
1459 | et_cmd->advertising = ADVERTISED_FIBRE; | ||
1460 | et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index]; | ||
1461 | } else { | ||
1462 | et_cmd->transceiver = XCVR_INTERNAL; | ||
1463 | et_cmd->supported = SUPPORTED_10000baseT_Full; | ||
1464 | et_cmd->advertising = ADVERTISED_10000baseT_Full; | ||
1465 | et_cmd->phy_address = nesdev->mac_index; | ||
1466 | } | ||
1467 | et_cmd->speed = SPEED_10000; | ||
1468 | et_cmd->autoneg = AUTONEG_DISABLE; | ||
1469 | } | 1475 | } |
1470 | et_cmd->maxtxpkt = 511; | 1476 | et_cmd->speed = SPEED_10000; |
1471 | et_cmd->maxrxpkt = 511; | 1477 | et_cmd->autoneg = AUTONEG_DISABLE; |
1472 | return 0; | 1478 | return 0; |
1473 | } | 1479 | } |
1474 | 1480 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 5a76a5510350..4c57f329dd50 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -70,12 +70,14 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
70 | */ | 70 | */ |
71 | if (ppriv->pkey == pkey) { | 71 | if (ppriv->pkey == pkey) { |
72 | result = -ENOTUNIQ; | 72 | result = -ENOTUNIQ; |
73 | priv = NULL; | ||
73 | goto err; | 74 | goto err; |
74 | } | 75 | } |
75 | 76 | ||
76 | list_for_each_entry(priv, &ppriv->child_intfs, list) { | 77 | list_for_each_entry(priv, &ppriv->child_intfs, list) { |
77 | if (priv->pkey == pkey) { | 78 | if (priv->pkey == pkey) { |
78 | result = -ENOTUNIQ; | 79 | result = -ENOTUNIQ; |
80 | priv = NULL; | ||
79 | goto err; | 81 | goto err; |
80 | } | 82 | } |
81 | } | 83 | } |
@@ -96,7 +98,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
96 | 98 | ||
97 | result = ipoib_set_dev_features(priv, ppriv->ca); | 99 | result = ipoib_set_dev_features(priv, ppriv->ca); |
98 | if (result) | 100 | if (result) |
99 | goto device_init_failed; | 101 | goto err; |
100 | 102 | ||
101 | priv->pkey = pkey; | 103 | priv->pkey = pkey; |
102 | 104 | ||
@@ -109,7 +111,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
109 | ipoib_warn(ppriv, "failed to initialize subinterface: " | 111 | ipoib_warn(ppriv, "failed to initialize subinterface: " |
110 | "device %s, port %d", | 112 | "device %s, port %d", |
111 | ppriv->ca->name, ppriv->port); | 113 | ppriv->ca->name, ppriv->port); |
112 | goto device_init_failed; | 114 | goto err; |
113 | } | 115 | } |
114 | 116 | ||
115 | result = register_netdevice(priv->dev); | 117 | result = register_netdevice(priv->dev); |
@@ -146,19 +148,19 @@ sysfs_failed: | |||
146 | register_failed: | 148 | register_failed: |
147 | ipoib_dev_cleanup(priv->dev); | 149 | ipoib_dev_cleanup(priv->dev); |
148 | 150 | ||
149 | device_init_failed: | ||
150 | free_netdev(priv->dev); | ||
151 | |||
152 | err: | 151 | err: |
153 | mutex_unlock(&ppriv->vlan_mutex); | 152 | mutex_unlock(&ppriv->vlan_mutex); |
154 | rtnl_unlock(); | 153 | rtnl_unlock(); |
154 | if (priv) | ||
155 | free_netdev(priv->dev); | ||
156 | |||
155 | return result; | 157 | return result; |
156 | } | 158 | } |
157 | 159 | ||
158 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | 160 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) |
159 | { | 161 | { |
160 | struct ipoib_dev_priv *ppriv, *priv, *tpriv; | 162 | struct ipoib_dev_priv *ppriv, *priv, *tpriv; |
161 | int ret = -ENOENT; | 163 | struct net_device *dev = NULL; |
162 | 164 | ||
163 | if (!capable(CAP_NET_ADMIN)) | 165 | if (!capable(CAP_NET_ADMIN)) |
164 | return -EPERM; | 166 | return -EPERM; |
@@ -172,14 +174,17 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
172 | unregister_netdevice(priv->dev); | 174 | unregister_netdevice(priv->dev); |
173 | ipoib_dev_cleanup(priv->dev); | 175 | ipoib_dev_cleanup(priv->dev); |
174 | list_del(&priv->list); | 176 | list_del(&priv->list); |
175 | free_netdev(priv->dev); | 177 | dev = priv->dev; |
176 | |||
177 | ret = 0; | ||
178 | break; | 178 | break; |
179 | } | 179 | } |
180 | } | 180 | } |
181 | mutex_unlock(&ppriv->vlan_mutex); | 181 | mutex_unlock(&ppriv->vlan_mutex); |
182 | rtnl_unlock(); | 182 | rtnl_unlock(); |
183 | 183 | ||
184 | return ret; | 184 | if (dev) { |
185 | free_netdev(dev); | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | return -ENODEV; | ||
185 | } | 190 | } |
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index ebf4be5b7c4e..2d175b5928ff 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c | |||
@@ -50,9 +50,8 @@ static LIST_HEAD(gameport_list); | |||
50 | 50 | ||
51 | static struct bus_type gameport_bus; | 51 | static struct bus_type gameport_bus; |
52 | 52 | ||
53 | static void gameport_add_driver(struct gameport_driver *drv); | ||
54 | static void gameport_add_port(struct gameport *gameport); | 53 | static void gameport_add_port(struct gameport *gameport); |
55 | static void gameport_destroy_port(struct gameport *gameport); | 54 | static void gameport_attach_driver(struct gameport_driver *drv); |
56 | static void gameport_reconnect_port(struct gameport *gameport); | 55 | static void gameport_reconnect_port(struct gameport *gameport); |
57 | static void gameport_disconnect_port(struct gameport *gameport); | 56 | static void gameport_disconnect_port(struct gameport *gameport); |
58 | 57 | ||
@@ -230,7 +229,6 @@ static void gameport_find_driver(struct gameport *gameport) | |||
230 | 229 | ||
231 | enum gameport_event_type { | 230 | enum gameport_event_type { |
232 | GAMEPORT_REGISTER_PORT, | 231 | GAMEPORT_REGISTER_PORT, |
233 | GAMEPORT_REGISTER_DRIVER, | ||
234 | GAMEPORT_ATTACH_DRIVER, | 232 | GAMEPORT_ATTACH_DRIVER, |
235 | }; | 233 | }; |
236 | 234 | ||
@@ -374,8 +372,8 @@ static void gameport_handle_event(void) | |||
374 | gameport_add_port(event->object); | 372 | gameport_add_port(event->object); |
375 | break; | 373 | break; |
376 | 374 | ||
377 | case GAMEPORT_REGISTER_DRIVER: | 375 | case GAMEPORT_ATTACH_DRIVER: |
378 | gameport_add_driver(event->object); | 376 | gameport_attach_driver(event->object); |
379 | break; | 377 | break; |
380 | 378 | ||
381 | default: | 379 | default: |
@@ -706,14 +704,14 @@ static int gameport_driver_remove(struct device *dev) | |||
706 | return 0; | 704 | return 0; |
707 | } | 705 | } |
708 | 706 | ||
709 | static void gameport_add_driver(struct gameport_driver *drv) | 707 | static void gameport_attach_driver(struct gameport_driver *drv) |
710 | { | 708 | { |
711 | int error; | 709 | int error; |
712 | 710 | ||
713 | error = driver_register(&drv->driver); | 711 | error = driver_attach(&drv->driver); |
714 | if (error) | 712 | if (error) |
715 | printk(KERN_ERR | 713 | printk(KERN_ERR |
716 | "gameport: driver_register() failed for %s, error: %d\n", | 714 | "gameport: driver_attach() failed for %s, error: %d\n", |
717 | drv->driver.name, error); | 715 | drv->driver.name, error); |
718 | } | 716 | } |
719 | 717 | ||
diff --git a/drivers/input/input.c b/drivers/input/input.c index ec3db3ade118..935a1835de2d 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -132,6 +132,11 @@ static void input_start_autorepeat(struct input_dev *dev, int code) | |||
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
135 | static void input_stop_autorepeat(struct input_dev *dev) | ||
136 | { | ||
137 | del_timer(&dev->timer); | ||
138 | } | ||
139 | |||
135 | #define INPUT_IGNORE_EVENT 0 | 140 | #define INPUT_IGNORE_EVENT 0 |
136 | #define INPUT_PASS_TO_HANDLERS 1 | 141 | #define INPUT_PASS_TO_HANDLERS 1 |
137 | #define INPUT_PASS_TO_DEVICE 2 | 142 | #define INPUT_PASS_TO_DEVICE 2 |
@@ -167,6 +172,8 @@ static void input_handle_event(struct input_dev *dev, | |||
167 | __change_bit(code, dev->key); | 172 | __change_bit(code, dev->key); |
168 | if (value) | 173 | if (value) |
169 | input_start_autorepeat(dev, code); | 174 | input_start_autorepeat(dev, code); |
175 | else | ||
176 | input_stop_autorepeat(dev); | ||
170 | } | 177 | } |
171 | 178 | ||
172 | disposition = INPUT_PASS_TO_HANDLERS; | 179 | disposition = INPUT_PASS_TO_HANDLERS; |
@@ -737,11 +744,11 @@ static inline void input_wakeup_procfs_readers(void) | |||
737 | 744 | ||
738 | static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait) | 745 | static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait) |
739 | { | 746 | { |
740 | int state = input_devices_state; | ||
741 | |||
742 | poll_wait(file, &input_devices_poll_wait, wait); | 747 | poll_wait(file, &input_devices_poll_wait, wait); |
743 | if (state != input_devices_state) | 748 | if (file->f_version != input_devices_state) { |
749 | file->f_version = input_devices_state; | ||
744 | return POLLIN | POLLRDNORM; | 750 | return POLLIN | POLLRDNORM; |
751 | } | ||
745 | 752 | ||
746 | return 0; | 753 | return 0; |
747 | } | 754 | } |
@@ -1542,7 +1549,6 @@ int input_register_handle(struct input_handle *handle) | |||
1542 | return error; | 1549 | return error; |
1543 | list_add_tail_rcu(&handle->d_node, &dev->h_list); | 1550 | list_add_tail_rcu(&handle->d_node, &dev->h_list); |
1544 | mutex_unlock(&dev->mutex); | 1551 | mutex_unlock(&dev->mutex); |
1545 | synchronize_rcu(); | ||
1546 | 1552 | ||
1547 | /* | 1553 | /* |
1548 | * Since we are supposed to be called from ->connect() | 1554 | * Since we are supposed to be called from ->connect() |
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 45470f18d7e9..444dec07e5d8 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c | |||
@@ -229,7 +229,8 @@ struct atkbd { | |||
229 | /* | 229 | /* |
230 | * System-specific ketymap fixup routine | 230 | * System-specific ketymap fixup routine |
231 | */ | 231 | */ |
232 | static void (*atkbd_platform_fixup)(struct atkbd *); | 232 | static void (*atkbd_platform_fixup)(struct atkbd *, const void *data); |
233 | static void *atkbd_platform_fixup_data; | ||
233 | 234 | ||
234 | static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, | 235 | static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, |
235 | ssize_t (*handler)(struct atkbd *, char *)); | 236 | ssize_t (*handler)(struct atkbd *, char *)); |
@@ -834,87 +835,64 @@ static void atkbd_disconnect(struct serio *serio) | |||
834 | } | 835 | } |
835 | 836 | ||
836 | /* | 837 | /* |
837 | * Most special keys (Fn+F?) on Dell laptops do not generate release | 838 | * generate release events for the keycodes given in data |
838 | * events so we have to do it ourselves. | ||
839 | */ | 839 | */ |
840 | static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd) | 840 | static void atkbd_apply_forced_release_keylist(struct atkbd* atkbd, |
841 | const void *data) | ||
841 | { | 842 | { |
842 | static const unsigned int forced_release_keys[] = { | 843 | const unsigned int *keys = data; |
843 | 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93, | 844 | unsigned int i; |
844 | }; | ||
845 | int i; | ||
846 | 845 | ||
847 | if (atkbd->set == 2) | 846 | if (atkbd->set == 2) |
848 | for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) | 847 | for (i = 0; keys[i] != -1U; i++) |
849 | __set_bit(forced_release_keys[i], | 848 | __set_bit(keys[i], atkbd->force_release_mask); |
850 | atkbd->force_release_mask); | ||
851 | } | 849 | } |
852 | 850 | ||
853 | /* | 851 | /* |
852 | * Most special keys (Fn+F?) on Dell laptops do not generate release | ||
853 | * events so we have to do it ourselves. | ||
854 | */ | ||
855 | static unsigned int atkbd_dell_laptop_forced_release_keys[] = { | ||
856 | 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93, -1U | ||
857 | }; | ||
858 | |||
859 | /* | ||
854 | * Perform fixup for HP system that doesn't generate release | 860 | * Perform fixup for HP system that doesn't generate release |
855 | * for its video switch | 861 | * for its video switch |
856 | */ | 862 | */ |
857 | static void atkbd_hp_keymap_fixup(struct atkbd *atkbd) | 863 | static unsigned int atkbd_hp_forced_release_keys[] = { |
858 | { | 864 | 0x94, -1U |
859 | static const unsigned int forced_release_keys[] = { | 865 | }; |
860 | 0x94, | ||
861 | }; | ||
862 | int i; | ||
863 | |||
864 | if (atkbd->set == 2) | ||
865 | for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) | ||
866 | __set_bit(forced_release_keys[i], | ||
867 | atkbd->force_release_mask); | ||
868 | } | ||
869 | 866 | ||
870 | /* | 867 | /* |
871 | * Inventec system with broken key release on volume keys | 868 | * Inventec system with broken key release on volume keys |
872 | */ | 869 | */ |
873 | static void atkbd_inventec_keymap_fixup(struct atkbd *atkbd) | 870 | static unsigned int atkbd_inventec_forced_release_keys[] = { |
874 | { | 871 | 0xae, 0xb0, -1U |
875 | const unsigned int forced_release_keys[] = { | 872 | }; |
876 | 0xae, 0xb0, | ||
877 | }; | ||
878 | int i; | ||
879 | |||
880 | if (atkbd->set == 2) | ||
881 | for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) | ||
882 | __set_bit(forced_release_keys[i], | ||
883 | atkbd->force_release_mask); | ||
884 | } | ||
885 | 873 | ||
886 | /* | 874 | /* |
887 | * Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release | 875 | * Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release |
888 | * for its volume buttons | 876 | * for its volume buttons |
889 | */ | 877 | */ |
890 | static void atkbd_hp_zv6100_keymap_fixup(struct atkbd *atkbd) | 878 | static unsigned int atkbd_hp_zv6100_forced_release_keys[] = { |
891 | { | 879 | 0xae, 0xb0, -1U |
892 | const unsigned int forced_release_keys[] = { | 880 | }; |
893 | 0xae, 0xb0, | ||
894 | }; | ||
895 | int i; | ||
896 | |||
897 | if (atkbd->set == 2) | ||
898 | for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) | ||
899 | __set_bit(forced_release_keys[i], | ||
900 | atkbd->force_release_mask); | ||
901 | } | ||
902 | 881 | ||
903 | /* | 882 | /* |
904 | * Samsung NC10 with Fn+F? key release not working | 883 | * Samsung NC10,NC20 with Fn+F? key release not working |
905 | */ | 884 | */ |
906 | static void atkbd_samsung_keymap_fixup(struct atkbd *atkbd) | 885 | static unsigned int atkbd_samsung_forced_release_keys[] = { |
907 | { | 886 | 0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9, -1U |
908 | const unsigned int forced_release_keys[] = { | 887 | }; |
909 | 0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9, | ||
910 | }; | ||
911 | int i; | ||
912 | 888 | ||
913 | if (atkbd->set == 2) | 889 | /* |
914 | for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) | 890 | * The volume up and volume down special keys on a Fujitsu Amilo PA 1510 laptop |
915 | __set_bit(forced_release_keys[i], | 891 | * do not generate release events so we have to do it ourselves. |
916 | atkbd->force_release_mask); | 892 | */ |
917 | } | 893 | static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = { |
894 | 0xb0, 0xae, -1U | ||
895 | }; | ||
918 | 896 | ||
919 | /* | 897 | /* |
920 | * atkbd_set_keycode_table() initializes keyboard's keycode table | 898 | * atkbd_set_keycode_table() initializes keyboard's keycode table |
@@ -967,7 +945,7 @@ static void atkbd_set_keycode_table(struct atkbd *atkbd) | |||
967 | * Perform additional fixups | 945 | * Perform additional fixups |
968 | */ | 946 | */ |
969 | if (atkbd_platform_fixup) | 947 | if (atkbd_platform_fixup) |
970 | atkbd_platform_fixup(atkbd); | 948 | atkbd_platform_fixup(atkbd, atkbd_platform_fixup_data); |
971 | } | 949 | } |
972 | 950 | ||
973 | /* | 951 | /* |
@@ -1492,9 +1470,11 @@ static ssize_t atkbd_show_err_count(struct atkbd *atkbd, char *buf) | |||
1492 | return sprintf(buf, "%lu\n", atkbd->err_count); | 1470 | return sprintf(buf, "%lu\n", atkbd->err_count); |
1493 | } | 1471 | } |
1494 | 1472 | ||
1495 | static int __init atkbd_setup_fixup(const struct dmi_system_id *id) | 1473 | static int __init atkbd_setup_forced_release(const struct dmi_system_id *id) |
1496 | { | 1474 | { |
1497 | atkbd_platform_fixup = id->driver_data; | 1475 | atkbd_platform_fixup = atkbd_apply_forced_release_keylist; |
1476 | atkbd_platform_fixup_data = id->driver_data; | ||
1477 | |||
1498 | return 0; | 1478 | return 0; |
1499 | } | 1479 | } |
1500 | 1480 | ||
@@ -1505,8 +1485,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { | |||
1505 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 1485 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
1506 | DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ | 1486 | DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ |
1507 | }, | 1487 | }, |
1508 | .callback = atkbd_setup_fixup, | 1488 | .callback = atkbd_setup_forced_release, |
1509 | .driver_data = atkbd_dell_laptop_keymap_fixup, | 1489 | .driver_data = atkbd_dell_laptop_forced_release_keys, |
1510 | }, | 1490 | }, |
1511 | { | 1491 | { |
1512 | .ident = "Dell Laptop", | 1492 | .ident = "Dell Laptop", |
@@ -1514,8 +1494,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { | |||
1514 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), | 1494 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), |
1515 | DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ | 1495 | DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ |
1516 | }, | 1496 | }, |
1517 | .callback = atkbd_setup_fixup, | 1497 | .callback = atkbd_setup_forced_release, |
1518 | .driver_data = atkbd_dell_laptop_keymap_fixup, | 1498 | .driver_data = atkbd_dell_laptop_forced_release_keys, |
1519 | }, | 1499 | }, |
1520 | { | 1500 | { |
1521 | .ident = "HP 2133", | 1501 | .ident = "HP 2133", |
@@ -1523,8 +1503,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { | |||
1523 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | 1503 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
1524 | DMI_MATCH(DMI_PRODUCT_NAME, "HP 2133"), | 1504 | DMI_MATCH(DMI_PRODUCT_NAME, "HP 2133"), |
1525 | }, | 1505 | }, |
1526 | .callback = atkbd_setup_fixup, | 1506 | .callback = atkbd_setup_forced_release, |
1527 | .driver_data = atkbd_hp_keymap_fixup, | 1507 | .driver_data = atkbd_hp_forced_release_keys, |
1528 | }, | 1508 | }, |
1529 | { | 1509 | { |
1530 | .ident = "HP Pavilion ZV6100", | 1510 | .ident = "HP Pavilion ZV6100", |
@@ -1532,8 +1512,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { | |||
1532 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | 1512 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
1533 | DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"), | 1513 | DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"), |
1534 | }, | 1514 | }, |
1535 | .callback = atkbd_setup_fixup, | 1515 | .callback = atkbd_setup_forced_release, |
1536 | .driver_data = atkbd_hp_zv6100_keymap_fixup, | 1516 | .driver_data = atkbd_hp_zv6100_forced_release_keys, |
1537 | }, | 1517 | }, |
1538 | { | 1518 | { |
1539 | .ident = "Inventec Symphony", | 1519 | .ident = "Inventec Symphony", |
@@ -1541,8 +1521,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { | |||
1541 | DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), | 1521 | DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), |
1542 | DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"), | 1522 | DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"), |
1543 | }, | 1523 | }, |
1544 | .callback = atkbd_setup_fixup, | 1524 | .callback = atkbd_setup_forced_release, |
1545 | .driver_data = atkbd_inventec_keymap_fixup, | 1525 | .driver_data = atkbd_inventec_forced_release_keys, |
1546 | }, | 1526 | }, |
1547 | { | 1527 | { |
1548 | .ident = "Samsung NC10", | 1528 | .ident = "Samsung NC10", |
@@ -1550,8 +1530,35 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { | |||
1550 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | 1530 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), |
1551 | DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), | 1531 | DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), |
1552 | }, | 1532 | }, |
1553 | .callback = atkbd_setup_fixup, | 1533 | .callback = atkbd_setup_forced_release, |
1554 | .driver_data = atkbd_samsung_keymap_fixup, | 1534 | .driver_data = atkbd_samsung_forced_release_keys, |
1535 | }, | ||
1536 | { | ||
1537 | .ident = "Samsung NC20", | ||
1538 | .matches = { | ||
1539 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
1540 | DMI_MATCH(DMI_PRODUCT_NAME, "NC20"), | ||
1541 | }, | ||
1542 | .callback = atkbd_setup_forced_release, | ||
1543 | .driver_data = atkbd_samsung_forced_release_keys, | ||
1544 | }, | ||
1545 | { | ||
1546 | .ident = "Samsung SQ45S70S", | ||
1547 | .matches = { | ||
1548 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
1549 | DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), | ||
1550 | }, | ||
1551 | .callback = atkbd_setup_forced_release, | ||
1552 | .driver_data = atkbd_samsung_forced_release_keys, | ||
1553 | }, | ||
1554 | { | ||
1555 | .ident = "Fujitsu Amilo PA 1510", | ||
1556 | .matches = { | ||
1557 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | ||
1558 | DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"), | ||
1559 | }, | ||
1560 | .callback = atkbd_setup_forced_release, | ||
1561 | .driver_data = atkbd_amilo_pa1510_forced_release_keys, | ||
1555 | }, | 1562 | }, |
1556 | { } | 1563 | { } |
1557 | }; | 1564 | }; |
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c index ee855c5202e8..d427f322e207 100644 --- a/drivers/input/keyboard/bf54x-keys.c +++ b/drivers/input/keyboard/bf54x-keys.c | |||
@@ -211,8 +211,8 @@ static int __devinit bfin_kpad_probe(struct platform_device *pdev) | |||
211 | 211 | ||
212 | if (!pdata->debounce_time || pdata->debounce_time > MAX_MULT || | 212 | if (!pdata->debounce_time || pdata->debounce_time > MAX_MULT || |
213 | !pdata->coldrive_time || pdata->coldrive_time > MAX_MULT) { | 213 | !pdata->coldrive_time || pdata->coldrive_time > MAX_MULT) { |
214 | printk(KERN_ERR DRV_NAME | 214 | printk(KERN_WARNING DRV_NAME |
215 | ": Invalid Debounce/Columdrive Time from pdata\n"); | 215 | ": Invalid Debounce/Columndrive Time in platform data\n"); |
216 | bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */ | 216 | bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */ |
217 | } else { | 217 | } else { |
218 | bfin_write_KPAD_MSEL( | 218 | bfin_write_KPAD_MSEL( |
@@ -252,7 +252,7 @@ static int __devinit bfin_kpad_probe(struct platform_device *pdev) | |||
252 | } | 252 | } |
253 | 253 | ||
254 | error = request_irq(bf54x_kpad->irq, bfin_kpad_isr, | 254 | error = request_irq(bf54x_kpad->irq, bfin_kpad_isr, |
255 | IRQF_SAMPLE_RANDOM, DRV_NAME, pdev); | 255 | 0, DRV_NAME, pdev); |
256 | if (error) { | 256 | if (error) { |
257 | printk(KERN_ERR DRV_NAME | 257 | printk(KERN_ERR DRV_NAME |
258 | ": unable to claim irq %d; error %d\n", | 258 | ": unable to claim irq %d; error %d\n", |
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c index aacf71f3cd44..e9d639ec283d 100644 --- a/drivers/input/keyboard/hilkbd.c +++ b/drivers/input/keyboard/hilkbd.c | |||
@@ -198,45 +198,28 @@ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len) | |||
198 | } | 198 | } |
199 | 199 | ||
200 | 200 | ||
201 | /* initialise HIL */ | 201 | /* initialize HIL */ |
202 | static int __init | 202 | static int __devinit hil_keyb_init(void) |
203 | hil_keyb_init(void) | ||
204 | { | 203 | { |
205 | unsigned char c; | 204 | unsigned char c; |
206 | unsigned int i, kbid; | 205 | unsigned int i, kbid; |
207 | wait_queue_head_t hil_wait; | 206 | wait_queue_head_t hil_wait; |
208 | int err; | 207 | int err; |
209 | 208 | ||
210 | if (hil_dev.dev) { | 209 | if (hil_dev.dev) |
211 | return -ENODEV; /* already initialized */ | 210 | return -ENODEV; /* already initialized */ |
212 | } | ||
213 | 211 | ||
212 | init_waitqueue_head(&hil_wait); | ||
214 | spin_lock_init(&hil_dev.lock); | 213 | spin_lock_init(&hil_dev.lock); |
214 | |||
215 | hil_dev.dev = input_allocate_device(); | 215 | hil_dev.dev = input_allocate_device(); |
216 | if (!hil_dev.dev) | 216 | if (!hil_dev.dev) |
217 | return -ENOMEM; | 217 | return -ENOMEM; |
218 | 218 | ||
219 | #if defined(CONFIG_HP300) | ||
220 | if (!MACH_IS_HP300) { | ||
221 | err = -ENODEV; | ||
222 | goto err1; | ||
223 | } | ||
224 | if (!hwreg_present((void *)(HILBASE + HIL_DATA))) { | ||
225 | printk(KERN_ERR "HIL: hardware register was not found\n"); | ||
226 | err = -ENODEV; | ||
227 | goto err1; | ||
228 | } | ||
229 | if (!request_region(HILBASE + HIL_DATA, 2, "hil")) { | ||
230 | printk(KERN_ERR "HIL: IOPORT region already used\n"); | ||
231 | err = -EIO; | ||
232 | goto err1; | ||
233 | } | ||
234 | #endif | ||
235 | |||
236 | err = request_irq(HIL_IRQ, hil_interrupt, 0, "hil", hil_dev.dev_id); | 219 | err = request_irq(HIL_IRQ, hil_interrupt, 0, "hil", hil_dev.dev_id); |
237 | if (err) { | 220 | if (err) { |
238 | printk(KERN_ERR "HIL: Can't get IRQ\n"); | 221 | printk(KERN_ERR "HIL: Can't get IRQ\n"); |
239 | goto err2; | 222 | goto err1; |
240 | } | 223 | } |
241 | 224 | ||
242 | /* Turn on interrupts */ | 225 | /* Turn on interrupts */ |
@@ -246,11 +229,9 @@ hil_keyb_init(void) | |||
246 | hil_dev.valid = 0; /* clear any pending data */ | 229 | hil_dev.valid = 0; /* clear any pending data */ |
247 | hil_do(HIL_READKBDSADR, NULL, 0); | 230 | hil_do(HIL_READKBDSADR, NULL, 0); |
248 | 231 | ||
249 | init_waitqueue_head(&hil_wait); | 232 | wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3 * HZ); |
250 | wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3*HZ); | 233 | if (!hil_dev.valid) |
251 | if (!hil_dev.valid) { | ||
252 | printk(KERN_WARNING "HIL: timed out, assuming no keyboard present\n"); | 234 | printk(KERN_WARNING "HIL: timed out, assuming no keyboard present\n"); |
253 | } | ||
254 | 235 | ||
255 | c = hil_dev.c; | 236 | c = hil_dev.c; |
256 | hil_dev.valid = 0; | 237 | hil_dev.valid = 0; |
@@ -268,7 +249,7 @@ hil_keyb_init(void) | |||
268 | 249 | ||
269 | for (i = 0; i < HIL_KEYCODES_SET1_TBLSIZE; i++) | 250 | for (i = 0; i < HIL_KEYCODES_SET1_TBLSIZE; i++) |
270 | if (hphilkeyb_keycode[i] != KEY_RESERVED) | 251 | if (hphilkeyb_keycode[i] != KEY_RESERVED) |
271 | set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit); | 252 | __set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit); |
272 | 253 | ||
273 | hil_dev.dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); | 254 | hil_dev.dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); |
274 | hil_dev.dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) | | 255 | hil_dev.dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) | |
@@ -287,34 +268,45 @@ hil_keyb_init(void) | |||
287 | err = input_register_device(hil_dev.dev); | 268 | err = input_register_device(hil_dev.dev); |
288 | if (err) { | 269 | if (err) { |
289 | printk(KERN_ERR "HIL: Can't register device\n"); | 270 | printk(KERN_ERR "HIL: Can't register device\n"); |
290 | goto err3; | 271 | goto err2; |
291 | } | 272 | } |
273 | |||
292 | printk(KERN_INFO "input: %s, ID %d at 0x%08lx (irq %d) found and attached\n", | 274 | printk(KERN_INFO "input: %s, ID %d at 0x%08lx (irq %d) found and attached\n", |
293 | hil_dev.dev->name, kbid, HILBASE, HIL_IRQ); | 275 | hil_dev.dev->name, kbid, HILBASE, HIL_IRQ); |
294 | 276 | ||
295 | return 0; | 277 | return 0; |
296 | 278 | ||
297 | err3: | 279 | err2: |
298 | hil_do(HIL_INTOFF, NULL, 0); | 280 | hil_do(HIL_INTOFF, NULL, 0); |
299 | disable_irq(HIL_IRQ); | ||
300 | free_irq(HIL_IRQ, hil_dev.dev_id); | 281 | free_irq(HIL_IRQ, hil_dev.dev_id); |
301 | err2: | ||
302 | #if defined(CONFIG_HP300) | ||
303 | release_region(HILBASE + HIL_DATA, 2); | ||
304 | err1: | 282 | err1: |
305 | #endif | ||
306 | input_free_device(hil_dev.dev); | 283 | input_free_device(hil_dev.dev); |
307 | hil_dev.dev = NULL; | 284 | hil_dev.dev = NULL; |
308 | return err; | 285 | return err; |
309 | } | 286 | } |
310 | 287 | ||
288 | static void __devexit hil_keyb_exit(void) | ||
289 | { | ||
290 | if (HIL_IRQ) | ||
291 | free_irq(HIL_IRQ, hil_dev.dev_id); | ||
292 | |||
293 | /* Turn off interrupts */ | ||
294 | hil_do(HIL_INTOFF, NULL, 0); | ||
295 | |||
296 | input_unregister_device(hil_dev.dev); | ||
297 | hil_dev.dev = NULL; | ||
298 | } | ||
311 | 299 | ||
312 | #if defined(CONFIG_PARISC) | 300 | #if defined(CONFIG_PARISC) |
313 | static int __init | 301 | static int __devinit hil_probe_chip(struct parisc_device *dev) |
314 | hil_init_chip(struct parisc_device *dev) | ||
315 | { | 302 | { |
303 | /* Only allow one HIL keyboard */ | ||
304 | if (hil_dev.dev) | ||
305 | return -ENODEV; | ||
306 | |||
316 | if (!dev->irq) { | 307 | if (!dev->irq) { |
317 | printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%08lx\n", dev->hpa.start); | 308 | printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%p\n", |
309 | (void *)dev->hpa.start); | ||
318 | return -ENODEV; | 310 | return -ENODEV; |
319 | } | 311 | } |
320 | 312 | ||
@@ -327,51 +319,79 @@ hil_init_chip(struct parisc_device *dev) | |||
327 | return hil_keyb_init(); | 319 | return hil_keyb_init(); |
328 | } | 320 | } |
329 | 321 | ||
322 | static int __devexit hil_remove_chip(struct parisc_device *dev) | ||
323 | { | ||
324 | hil_keyb_exit(); | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
330 | static struct parisc_device_id hil_tbl[] = { | 329 | static struct parisc_device_id hil_tbl[] = { |
331 | { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00073 }, | 330 | { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00073 }, |
332 | { 0, } | 331 | { 0, } |
333 | }; | 332 | }; |
334 | 333 | ||
334 | #if 0 | ||
335 | /* Disabled to avoid conflicts with the HP SDC HIL drivers */ | ||
335 | MODULE_DEVICE_TABLE(parisc, hil_tbl); | 336 | MODULE_DEVICE_TABLE(parisc, hil_tbl); |
337 | #endif | ||
336 | 338 | ||
337 | static struct parisc_driver hil_driver = { | 339 | static struct parisc_driver hil_driver = { |
338 | .name = "hil", | 340 | .name = "hil", |
339 | .id_table = hil_tbl, | 341 | .id_table = hil_tbl, |
340 | .probe = hil_init_chip, | 342 | .probe = hil_probe_chip, |
343 | .remove = __devexit_p(hil_remove_chip), | ||
341 | }; | 344 | }; |
342 | #endif /* CONFIG_PARISC */ | ||
343 | |||
344 | 345 | ||
345 | static int __init hil_init(void) | 346 | static int __init hil_init(void) |
346 | { | 347 | { |
347 | #if defined(CONFIG_PARISC) | ||
348 | return register_parisc_driver(&hil_driver); | 348 | return register_parisc_driver(&hil_driver); |
349 | #else | ||
350 | return hil_keyb_init(); | ||
351 | #endif | ||
352 | } | 349 | } |
353 | 350 | ||
354 | |||
355 | static void __exit hil_exit(void) | 351 | static void __exit hil_exit(void) |
356 | { | 352 | { |
357 | if (HIL_IRQ) { | 353 | unregister_parisc_driver(&hil_driver); |
358 | disable_irq(HIL_IRQ); | 354 | } |
359 | free_irq(HIL_IRQ, hil_dev.dev_id); | 355 | |
356 | #else /* !CONFIG_PARISC */ | ||
357 | |||
358 | static int __init hil_init(void) | ||
359 | { | ||
360 | int error; | ||
361 | |||
362 | /* Only allow one HIL keyboard */ | ||
363 | if (hil_dev.dev) | ||
364 | return -EBUSY; | ||
365 | |||
366 | if (!MACH_IS_HP300) | ||
367 | return -ENODEV; | ||
368 | |||
369 | if (!hwreg_present((void *)(HILBASE + HIL_DATA))) { | ||
370 | printk(KERN_ERR "HIL: hardware register was not found\n"); | ||
371 | return -ENODEV; | ||
360 | } | 372 | } |
361 | 373 | ||
362 | /* Turn off interrupts */ | 374 | if (!request_region(HILBASE + HIL_DATA, 2, "hil")) { |
363 | hil_do(HIL_INTOFF, NULL, 0); | 375 | printk(KERN_ERR "HIL: IOPORT region already used\n"); |
376 | return -EIO; | ||
377 | } | ||
364 | 378 | ||
365 | input_unregister_device(hil_dev.dev); | 379 | error = hil_keyb_init(); |
380 | if (error) { | ||
381 | release_region(HILBASE + HIL_DATA, 2); | ||
382 | return error; | ||
383 | } | ||
366 | 384 | ||
367 | hil_dev.dev = NULL; | 385 | return 0; |
386 | } | ||
368 | 387 | ||
369 | #if defined(CONFIG_PARISC) | 388 | static void __exit hil_exit(void) |
370 | unregister_parisc_driver(&hil_driver); | 389 | { |
371 | #else | 390 | hil_keyb_exit(); |
372 | release_region(HILBASE+HIL_DATA, 2); | 391 | release_region(HILBASE + HIL_DATA, 2); |
373 | #endif | ||
374 | } | 392 | } |
375 | 393 | ||
394 | #endif /* CONFIG_PARISC */ | ||
395 | |||
376 | module_init(hil_init); | 396 | module_init(hil_init); |
377 | module_exit(hil_exit); | 397 | module_exit(hil_exit); |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 67e5553f699a..5c0a631d1455 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
@@ -214,7 +214,7 @@ config INPUT_SGI_BTNS | |||
214 | 214 | ||
215 | config HP_SDC_RTC | 215 | config HP_SDC_RTC |
216 | tristate "HP SDC Real Time Clock" | 216 | tristate "HP SDC Real Time Clock" |
217 | depends on GSC || HP300 | 217 | depends on (GSC || HP300) && SERIO |
218 | select HP_SDC | 218 | select HP_SDC |
219 | help | 219 | help |
220 | Say Y here if you want to support the built-in real time clock | 220 | Say Y here if you want to support the built-in real time clock |
@@ -227,4 +227,27 @@ config INPUT_PCF50633_PMU | |||
227 | Say Y to include support for delivering PMU events via input | 227 | Say Y to include support for delivering PMU events via input |
228 | layer on NXP PCF50633. | 228 | layer on NXP PCF50633. |
229 | 229 | ||
230 | config INPUT_GPIO_ROTARY_ENCODER | ||
231 | tristate "Rotary encoders connected to GPIO pins" | ||
232 | depends on GPIOLIB && GENERIC_GPIO | ||
233 | help | ||
234 | Say Y here to add support for rotary encoders connected to GPIO lines. | ||
235 | Check file:Documentation/incput/rotary_encoder.txt for more | ||
236 | information. | ||
237 | |||
238 | To compile this driver as a module, choose M here: the | ||
239 | module will be called rotary_encoder. | ||
240 | |||
241 | config INPUT_RB532_BUTTON | ||
242 | tristate "Mikrotik Routerboard 532 button interface" | ||
243 | depends on MIKROTIK_RB532 | ||
244 | depends on GPIOLIB && GENERIC_GPIO | ||
245 | select INPUT_POLLDEV | ||
246 | help | ||
247 | Say Y here if you want support for the S1 button built into | ||
248 | Mikrotik's Routerboard 532. | ||
249 | |||
250 | To compile this driver as a module, choose M here: the | ||
251 | module will be called rb532_button. | ||
252 | |||
230 | endif | 253 | endif |
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index bb62e6efacf3..eb3f407baedf 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile | |||
@@ -4,21 +4,23 @@ | |||
4 | 4 | ||
5 | # Each configuration option enables a list of files. | 5 | # Each configuration option enables a list of files. |
6 | 6 | ||
7 | obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o | 7 | obj-$(CONFIG_INPUT_APANEL) += apanel.o |
8 | obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o | ||
9 | obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o | ||
10 | obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o | ||
11 | obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o | ||
12 | obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o | ||
13 | obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o | ||
14 | obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o | 8 | obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o |
15 | obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o | 9 | obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o |
16 | obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o | 10 | obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o |
17 | obj-$(CONFIG_INPUT_POWERMATE) += powermate.o | ||
18 | obj-$(CONFIG_INPUT_YEALINK) += yealink.o | ||
19 | obj-$(CONFIG_INPUT_CM109) += cm109.o | 11 | obj-$(CONFIG_INPUT_CM109) += cm109.o |
12 | obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o | ||
20 | obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o | 13 | obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o |
21 | obj-$(CONFIG_INPUT_UINPUT) += uinput.o | 14 | obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o |
22 | obj-$(CONFIG_INPUT_APANEL) += apanel.o | 15 | obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o |
23 | obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o | 16 | obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o |
24 | obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o | 17 | obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o |
18 | obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o | ||
19 | obj-$(CONFIG_INPUT_POWERMATE) += powermate.o | ||
20 | obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o | ||
21 | obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o | ||
22 | obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o | ||
23 | obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o | ||
24 | obj-$(CONFIG_INPUT_UINPUT) += uinput.o | ||
25 | obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o | ||
26 | obj-$(CONFIG_INPUT_YEALINK) += yealink.o | ||
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c index 3c9988dc0e9f..922c05141585 100644 --- a/drivers/input/misc/ati_remote2.c +++ b/drivers/input/misc/ati_remote2.c | |||
@@ -31,12 +31,73 @@ MODULE_LICENSE("GPL"); | |||
31 | * newly configured "channel". | 31 | * newly configured "channel". |
32 | */ | 32 | */ |
33 | 33 | ||
34 | static unsigned int channel_mask = 0xFFFF; | 34 | enum { |
35 | module_param(channel_mask, uint, 0644); | 35 | ATI_REMOTE2_MAX_CHANNEL_MASK = 0xFFFF, |
36 | ATI_REMOTE2_MAX_MODE_MASK = 0x1F, | ||
37 | }; | ||
38 | |||
39 | static int ati_remote2_set_mask(const char *val, | ||
40 | struct kernel_param *kp, unsigned int max) | ||
41 | { | ||
42 | unsigned long mask; | ||
43 | int ret; | ||
44 | |||
45 | if (!val) | ||
46 | return -EINVAL; | ||
47 | |||
48 | ret = strict_strtoul(val, 0, &mask); | ||
49 | if (ret) | ||
50 | return ret; | ||
51 | |||
52 | if (mask & ~max) | ||
53 | return -EINVAL; | ||
54 | |||
55 | *(unsigned int *)kp->arg = mask; | ||
56 | |||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static int ati_remote2_set_channel_mask(const char *val, | ||
61 | struct kernel_param *kp) | ||
62 | { | ||
63 | pr_debug("%s()\n", __func__); | ||
64 | |||
65 | return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_CHANNEL_MASK); | ||
66 | } | ||
67 | |||
68 | static int ati_remote2_get_channel_mask(char *buffer, struct kernel_param *kp) | ||
69 | { | ||
70 | pr_debug("%s()\n", __func__); | ||
71 | |||
72 | return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg); | ||
73 | } | ||
74 | |||
75 | static int ati_remote2_set_mode_mask(const char *val, struct kernel_param *kp) | ||
76 | { | ||
77 | pr_debug("%s()\n", __func__); | ||
78 | |||
79 | return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_MODE_MASK); | ||
80 | } | ||
81 | |||
82 | static int ati_remote2_get_mode_mask(char *buffer, struct kernel_param *kp) | ||
83 | { | ||
84 | pr_debug("%s()\n", __func__); | ||
85 | |||
86 | return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg); | ||
87 | } | ||
88 | |||
89 | static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK; | ||
90 | #define param_check_channel_mask(name, p) __param_check(name, p, unsigned int) | ||
91 | #define param_set_channel_mask ati_remote2_set_channel_mask | ||
92 | #define param_get_channel_mask ati_remote2_get_channel_mask | ||
93 | module_param(channel_mask, channel_mask, 0644); | ||
36 | MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<1:Channel2><0:Channel1>"); | 94 | MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<1:Channel2><0:Channel1>"); |
37 | 95 | ||
38 | static unsigned int mode_mask = 0x1F; | 96 | static unsigned int mode_mask = ATI_REMOTE2_MAX_MODE_MASK; |
39 | module_param(mode_mask, uint, 0644); | 97 | #define param_check_mode_mask(name, p) __param_check(name, p, unsigned int) |
98 | #define param_set_mode_mask ati_remote2_set_mode_mask | ||
99 | #define param_get_mode_mask ati_remote2_get_mode_mask | ||
100 | module_param(mode_mask, mode_mask, 0644); | ||
40 | MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>"); | 101 | MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>"); |
41 | 102 | ||
42 | static struct usb_device_id ati_remote2_id_table[] = { | 103 | static struct usb_device_id ati_remote2_id_table[] = { |
@@ -133,12 +194,18 @@ struct ati_remote2 { | |||
133 | u16 keycode[ATI_REMOTE2_MODES][ARRAY_SIZE(ati_remote2_key_table)]; | 194 | u16 keycode[ATI_REMOTE2_MODES][ARRAY_SIZE(ati_remote2_key_table)]; |
134 | 195 | ||
135 | unsigned int flags; | 196 | unsigned int flags; |
197 | |||
198 | unsigned int channel_mask; | ||
199 | unsigned int mode_mask; | ||
136 | }; | 200 | }; |
137 | 201 | ||
138 | static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id); | 202 | static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id); |
139 | static void ati_remote2_disconnect(struct usb_interface *interface); | 203 | static void ati_remote2_disconnect(struct usb_interface *interface); |
140 | static int ati_remote2_suspend(struct usb_interface *interface, pm_message_t message); | 204 | static int ati_remote2_suspend(struct usb_interface *interface, pm_message_t message); |
141 | static int ati_remote2_resume(struct usb_interface *interface); | 205 | static int ati_remote2_resume(struct usb_interface *interface); |
206 | static int ati_remote2_reset_resume(struct usb_interface *interface); | ||
207 | static int ati_remote2_pre_reset(struct usb_interface *interface); | ||
208 | static int ati_remote2_post_reset(struct usb_interface *interface); | ||
142 | 209 | ||
143 | static struct usb_driver ati_remote2_driver = { | 210 | static struct usb_driver ati_remote2_driver = { |
144 | .name = "ati_remote2", | 211 | .name = "ati_remote2", |
@@ -147,6 +214,9 @@ static struct usb_driver ati_remote2_driver = { | |||
147 | .id_table = ati_remote2_id_table, | 214 | .id_table = ati_remote2_id_table, |
148 | .suspend = ati_remote2_suspend, | 215 | .suspend = ati_remote2_suspend, |
149 | .resume = ati_remote2_resume, | 216 | .resume = ati_remote2_resume, |
217 | .reset_resume = ati_remote2_reset_resume, | ||
218 | .pre_reset = ati_remote2_pre_reset, | ||
219 | .post_reset = ati_remote2_post_reset, | ||
150 | .supports_autosuspend = 1, | 220 | .supports_autosuspend = 1, |
151 | }; | 221 | }; |
152 | 222 | ||
@@ -238,7 +308,7 @@ static void ati_remote2_input_mouse(struct ati_remote2 *ar2) | |||
238 | 308 | ||
239 | channel = data[0] >> 4; | 309 | channel = data[0] >> 4; |
240 | 310 | ||
241 | if (!((1 << channel) & channel_mask)) | 311 | if (!((1 << channel) & ar2->channel_mask)) |
242 | return; | 312 | return; |
243 | 313 | ||
244 | mode = data[0] & 0x0F; | 314 | mode = data[0] & 0x0F; |
@@ -250,7 +320,7 @@ static void ati_remote2_input_mouse(struct ati_remote2 *ar2) | |||
250 | return; | 320 | return; |
251 | } | 321 | } |
252 | 322 | ||
253 | if (!((1 << mode) & mode_mask)) | 323 | if (!((1 << mode) & ar2->mode_mask)) |
254 | return; | 324 | return; |
255 | 325 | ||
256 | input_event(idev, EV_REL, REL_X, (s8) data[1]); | 326 | input_event(idev, EV_REL, REL_X, (s8) data[1]); |
@@ -277,7 +347,7 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2) | |||
277 | 347 | ||
278 | channel = data[0] >> 4; | 348 | channel = data[0] >> 4; |
279 | 349 | ||
280 | if (!((1 << channel) & channel_mask)) | 350 | if (!((1 << channel) & ar2->channel_mask)) |
281 | return; | 351 | return; |
282 | 352 | ||
283 | mode = data[0] & 0x0F; | 353 | mode = data[0] & 0x0F; |
@@ -305,7 +375,7 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2) | |||
305 | ar2->mode = mode; | 375 | ar2->mode = mode; |
306 | } | 376 | } |
307 | 377 | ||
308 | if (!((1 << mode) & mode_mask)) | 378 | if (!((1 << mode) & ar2->mode_mask)) |
309 | return; | 379 | return; |
310 | 380 | ||
311 | index = ati_remote2_lookup(hw_code); | 381 | index = ati_remote2_lookup(hw_code); |
@@ -410,7 +480,7 @@ static int ati_remote2_getkeycode(struct input_dev *idev, | |||
410 | int index, mode; | 480 | int index, mode; |
411 | 481 | ||
412 | mode = scancode >> 8; | 482 | mode = scancode >> 8; |
413 | if (mode > ATI_REMOTE2_PC || !((1 << mode) & mode_mask)) | 483 | if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask)) |
414 | return -EINVAL; | 484 | return -EINVAL; |
415 | 485 | ||
416 | index = ati_remote2_lookup(scancode & 0xFF); | 486 | index = ati_remote2_lookup(scancode & 0xFF); |
@@ -427,7 +497,7 @@ static int ati_remote2_setkeycode(struct input_dev *idev, int scancode, int keyc | |||
427 | int index, mode, old_keycode; | 497 | int index, mode, old_keycode; |
428 | 498 | ||
429 | mode = scancode >> 8; | 499 | mode = scancode >> 8; |
430 | if (mode > ATI_REMOTE2_PC || !((1 << mode) & mode_mask)) | 500 | if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask)) |
431 | return -EINVAL; | 501 | return -EINVAL; |
432 | 502 | ||
433 | index = ati_remote2_lookup(scancode & 0xFF); | 503 | index = ati_remote2_lookup(scancode & 0xFF); |
@@ -550,7 +620,7 @@ static void ati_remote2_urb_cleanup(struct ati_remote2 *ar2) | |||
550 | } | 620 | } |
551 | } | 621 | } |
552 | 622 | ||
553 | static int ati_remote2_setup(struct ati_remote2 *ar2) | 623 | static int ati_remote2_setup(struct ati_remote2 *ar2, unsigned int ch_mask) |
554 | { | 624 | { |
555 | int r, i, channel; | 625 | int r, i, channel; |
556 | 626 | ||
@@ -565,8 +635,8 @@ static int ati_remote2_setup(struct ati_remote2 *ar2) | |||
565 | 635 | ||
566 | channel = 0; | 636 | channel = 0; |
567 | for (i = 0; i < 16; i++) { | 637 | for (i = 0; i < 16; i++) { |
568 | if ((1 << i) & channel_mask) { | 638 | if ((1 << i) & ch_mask) { |
569 | if (!(~(1 << i) & 0xFFFF & channel_mask)) | 639 | if (!(~(1 << i) & ch_mask)) |
570 | channel = i + 1; | 640 | channel = i + 1; |
571 | break; | 641 | break; |
572 | } | 642 | } |
@@ -585,6 +655,99 @@ static int ati_remote2_setup(struct ati_remote2 *ar2) | |||
585 | return 0; | 655 | return 0; |
586 | } | 656 | } |
587 | 657 | ||
658 | static ssize_t ati_remote2_show_channel_mask(struct device *dev, | ||
659 | struct device_attribute *attr, | ||
660 | char *buf) | ||
661 | { | ||
662 | struct usb_device *udev = to_usb_device(dev); | ||
663 | struct usb_interface *intf = usb_ifnum_to_if(udev, 0); | ||
664 | struct ati_remote2 *ar2 = usb_get_intfdata(intf); | ||
665 | |||
666 | return sprintf(buf, "0x%04x\n", ar2->channel_mask); | ||
667 | } | ||
668 | |||
669 | static ssize_t ati_remote2_store_channel_mask(struct device *dev, | ||
670 | struct device_attribute *attr, | ||
671 | const char *buf, size_t count) | ||
672 | { | ||
673 | struct usb_device *udev = to_usb_device(dev); | ||
674 | struct usb_interface *intf = usb_ifnum_to_if(udev, 0); | ||
675 | struct ati_remote2 *ar2 = usb_get_intfdata(intf); | ||
676 | unsigned long mask; | ||
677 | int r; | ||
678 | |||
679 | if (strict_strtoul(buf, 0, &mask)) | ||
680 | return -EINVAL; | ||
681 | |||
682 | if (mask & ~ATI_REMOTE2_MAX_CHANNEL_MASK) | ||
683 | return -EINVAL; | ||
684 | |||
685 | r = usb_autopm_get_interface(ar2->intf[0]); | ||
686 | if (r) { | ||
687 | dev_err(&ar2->intf[0]->dev, | ||
688 | "%s(): usb_autopm_get_interface() = %d\n", __func__, r); | ||
689 | return r; | ||
690 | } | ||
691 | |||
692 | mutex_lock(&ati_remote2_mutex); | ||
693 | |||
694 | if (mask != ar2->channel_mask && !ati_remote2_setup(ar2, mask)) | ||
695 | ar2->channel_mask = mask; | ||
696 | |||
697 | mutex_unlock(&ati_remote2_mutex); | ||
698 | |||
699 | usb_autopm_put_interface(ar2->intf[0]); | ||
700 | |||
701 | return count; | ||
702 | } | ||
703 | |||
704 | static ssize_t ati_remote2_show_mode_mask(struct device *dev, | ||
705 | struct device_attribute *attr, | ||
706 | char *buf) | ||
707 | { | ||
708 | struct usb_device *udev = to_usb_device(dev); | ||
709 | struct usb_interface *intf = usb_ifnum_to_if(udev, 0); | ||
710 | struct ati_remote2 *ar2 = usb_get_intfdata(intf); | ||
711 | |||
712 | return sprintf(buf, "0x%02x\n", ar2->mode_mask); | ||
713 | } | ||
714 | |||
715 | static ssize_t ati_remote2_store_mode_mask(struct device *dev, | ||
716 | struct device_attribute *attr, | ||
717 | const char *buf, size_t count) | ||
718 | { | ||
719 | struct usb_device *udev = to_usb_device(dev); | ||
720 | struct usb_interface *intf = usb_ifnum_to_if(udev, 0); | ||
721 | struct ati_remote2 *ar2 = usb_get_intfdata(intf); | ||
722 | unsigned long mask; | ||
723 | |||
724 | if (strict_strtoul(buf, 0, &mask)) | ||
725 | return -EINVAL; | ||
726 | |||
727 | if (mask & ~ATI_REMOTE2_MAX_MODE_MASK) | ||
728 | return -EINVAL; | ||
729 | |||
730 | ar2->mode_mask = mask; | ||
731 | |||
732 | return count; | ||
733 | } | ||
734 | |||
735 | static DEVICE_ATTR(channel_mask, 0644, ati_remote2_show_channel_mask, | ||
736 | ati_remote2_store_channel_mask); | ||
737 | |||
738 | static DEVICE_ATTR(mode_mask, 0644, ati_remote2_show_mode_mask, | ||
739 | ati_remote2_store_mode_mask); | ||
740 | |||
741 | static struct attribute *ati_remote2_attrs[] = { | ||
742 | &dev_attr_channel_mask.attr, | ||
743 | &dev_attr_mode_mask.attr, | ||
744 | NULL, | ||
745 | }; | ||
746 | |||
747 | static struct attribute_group ati_remote2_attr_group = { | ||
748 | .attrs = ati_remote2_attrs, | ||
749 | }; | ||
750 | |||
588 | static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id) | 751 | static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id) |
589 | { | 752 | { |
590 | struct usb_device *udev = interface_to_usbdev(interface); | 753 | struct usb_device *udev = interface_to_usbdev(interface); |
@@ -615,7 +778,10 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d | |||
615 | if (r) | 778 | if (r) |
616 | goto fail2; | 779 | goto fail2; |
617 | 780 | ||
618 | r = ati_remote2_setup(ar2); | 781 | ar2->channel_mask = channel_mask; |
782 | ar2->mode_mask = mode_mask; | ||
783 | |||
784 | r = ati_remote2_setup(ar2, ar2->channel_mask); | ||
619 | if (r) | 785 | if (r) |
620 | goto fail2; | 786 | goto fail2; |
621 | 787 | ||
@@ -624,19 +790,24 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d | |||
624 | 790 | ||
625 | strlcat(ar2->name, "ATI Remote Wonder II", sizeof(ar2->name)); | 791 | strlcat(ar2->name, "ATI Remote Wonder II", sizeof(ar2->name)); |
626 | 792 | ||
627 | r = ati_remote2_input_init(ar2); | 793 | r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group); |
628 | if (r) | 794 | if (r) |
629 | goto fail2; | 795 | goto fail2; |
630 | 796 | ||
797 | r = ati_remote2_input_init(ar2); | ||
798 | if (r) | ||
799 | goto fail3; | ||
800 | |||
631 | usb_set_intfdata(interface, ar2); | 801 | usb_set_intfdata(interface, ar2); |
632 | 802 | ||
633 | interface->needs_remote_wakeup = 1; | 803 | interface->needs_remote_wakeup = 1; |
634 | 804 | ||
635 | return 0; | 805 | return 0; |
636 | 806 | ||
807 | fail3: | ||
808 | sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group); | ||
637 | fail2: | 809 | fail2: |
638 | ati_remote2_urb_cleanup(ar2); | 810 | ati_remote2_urb_cleanup(ar2); |
639 | |||
640 | usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); | 811 | usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); |
641 | fail1: | 812 | fail1: |
642 | kfree(ar2); | 813 | kfree(ar2); |
@@ -657,6 +828,8 @@ static void ati_remote2_disconnect(struct usb_interface *interface) | |||
657 | 828 | ||
658 | input_unregister_device(ar2->idev); | 829 | input_unregister_device(ar2->idev); |
659 | 830 | ||
831 | sysfs_remove_group(&ar2->udev->dev.kobj, &ati_remote2_attr_group); | ||
832 | |||
660 | ati_remote2_urb_cleanup(ar2); | 833 | ati_remote2_urb_cleanup(ar2); |
661 | 834 | ||
662 | usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); | 835 | usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); |
@@ -715,6 +888,78 @@ static int ati_remote2_resume(struct usb_interface *interface) | |||
715 | return r; | 888 | return r; |
716 | } | 889 | } |
717 | 890 | ||
891 | static int ati_remote2_reset_resume(struct usb_interface *interface) | ||
892 | { | ||
893 | struct ati_remote2 *ar2; | ||
894 | struct usb_host_interface *alt = interface->cur_altsetting; | ||
895 | int r = 0; | ||
896 | |||
897 | if (alt->desc.bInterfaceNumber) | ||
898 | return 0; | ||
899 | |||
900 | ar2 = usb_get_intfdata(interface); | ||
901 | |||
902 | dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); | ||
903 | |||
904 | mutex_lock(&ati_remote2_mutex); | ||
905 | |||
906 | r = ati_remote2_setup(ar2, ar2->channel_mask); | ||
907 | if (r) | ||
908 | goto out; | ||
909 | |||
910 | if (ar2->flags & ATI_REMOTE2_OPENED) | ||
911 | r = ati_remote2_submit_urbs(ar2); | ||
912 | |||
913 | if (!r) | ||
914 | ar2->flags &= ~ATI_REMOTE2_SUSPENDED; | ||
915 | |||
916 | out: | ||
917 | mutex_unlock(&ati_remote2_mutex); | ||
918 | |||
919 | return r; | ||
920 | } | ||
921 | |||
922 | static int ati_remote2_pre_reset(struct usb_interface *interface) | ||
923 | { | ||
924 | struct ati_remote2 *ar2; | ||
925 | struct usb_host_interface *alt = interface->cur_altsetting; | ||
926 | |||
927 | if (alt->desc.bInterfaceNumber) | ||
928 | return 0; | ||
929 | |||
930 | ar2 = usb_get_intfdata(interface); | ||
931 | |||
932 | dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); | ||
933 | |||
934 | mutex_lock(&ati_remote2_mutex); | ||
935 | |||
936 | if (ar2->flags == ATI_REMOTE2_OPENED) | ||
937 | ati_remote2_kill_urbs(ar2); | ||
938 | |||
939 | return 0; | ||
940 | } | ||
941 | |||
942 | static int ati_remote2_post_reset(struct usb_interface *interface) | ||
943 | { | ||
944 | struct ati_remote2 *ar2; | ||
945 | struct usb_host_interface *alt = interface->cur_altsetting; | ||
946 | int r = 0; | ||
947 | |||
948 | if (alt->desc.bInterfaceNumber) | ||
949 | return 0; | ||
950 | |||
951 | ar2 = usb_get_intfdata(interface); | ||
952 | |||
953 | dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); | ||
954 | |||
955 | if (ar2->flags == ATI_REMOTE2_OPENED) | ||
956 | r = ati_remote2_submit_urbs(ar2); | ||
957 | |||
958 | mutex_unlock(&ati_remote2_mutex); | ||
959 | |||
960 | return r; | ||
961 | } | ||
962 | |||
718 | static int __init ati_remote2_init(void) | 963 | static int __init ati_remote2_init(void) |
719 | { | 964 | { |
720 | int r; | 965 | int r; |
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c new file mode 100644 index 000000000000..e2c7f622a0b5 --- /dev/null +++ b/drivers/input/misc/rb532_button.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * Support for the S1 button on Routerboard 532 | ||
3 | * | ||
4 | * Copyright (C) 2009 Phil Sutter <n0-1@freewrt.org> | ||
5 | */ | ||
6 | |||
7 | #include <linux/input-polldev.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/platform_device.h> | ||
10 | |||
11 | #include <asm/mach-rc32434/gpio.h> | ||
12 | #include <asm/mach-rc32434/rb.h> | ||
13 | |||
14 | #define DRV_NAME "rb532-button" | ||
15 | |||
16 | #define RB532_BTN_RATE 100 /* msec */ | ||
17 | #define RB532_BTN_KSYM BTN_0 | ||
18 | |||
19 | /* The S1 button state is provided by GPIO pin 1. But as this | ||
20 | * pin is also used for uart input as alternate function, the | ||
21 | * operational modes must be switched first: | ||
22 | * 1) disable uart using set_latch_u5() | ||
23 | * 2) turn off alternate function implicitly through | ||
24 | * gpio_direction_input() | ||
25 | * 3) read the GPIO's current value | ||
26 | * 4) undo step 2 by enabling alternate function (in this | ||
27 | * mode the GPIO direction is fixed, so no change needed) | ||
28 | * 5) turn on uart again | ||
29 | * The GPIO value occurs to be inverted, so pin high means | ||
30 | * button is not pressed. | ||
31 | */ | ||
32 | static bool rb532_button_pressed(void) | ||
33 | { | ||
34 | int val; | ||
35 | |||
36 | set_latch_u5(0, LO_FOFF); | ||
37 | gpio_direction_input(GPIO_BTN_S1); | ||
38 | |||
39 | val = gpio_get_value(GPIO_BTN_S1); | ||
40 | |||
41 | rb532_gpio_set_func(GPIO_BTN_S1); | ||
42 | set_latch_u5(LO_FOFF, 0); | ||
43 | |||
44 | return !val; | ||
45 | } | ||
46 | |||
47 | static void rb532_button_poll(struct input_polled_dev *poll_dev) | ||
48 | { | ||
49 | input_report_key(poll_dev->input, RB532_BTN_KSYM, | ||
50 | rb532_button_pressed()); | ||
51 | input_sync(poll_dev->input); | ||
52 | } | ||
53 | |||
54 | static int __devinit rb532_button_probe(struct platform_device *pdev) | ||
55 | { | ||
56 | struct input_polled_dev *poll_dev; | ||
57 | int error; | ||
58 | |||
59 | poll_dev = input_allocate_polled_device(); | ||
60 | if (!poll_dev) | ||
61 | return -ENOMEM; | ||
62 | |||
63 | poll_dev->poll = rb532_button_poll; | ||
64 | poll_dev->poll_interval = RB532_BTN_RATE; | ||
65 | |||
66 | poll_dev->input->name = "rb532 button"; | ||
67 | poll_dev->input->phys = "rb532/button0"; | ||
68 | poll_dev->input->id.bustype = BUS_HOST; | ||
69 | poll_dev->input->dev.parent = &pdev->dev; | ||
70 | |||
71 | dev_set_drvdata(&pdev->dev, poll_dev); | ||
72 | |||
73 | input_set_capability(poll_dev->input, EV_KEY, RB532_BTN_KSYM); | ||
74 | |||
75 | error = input_register_polled_device(poll_dev); | ||
76 | if (error) { | ||
77 | input_free_polled_device(poll_dev); | ||
78 | return error; | ||
79 | } | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int __devexit rb532_button_remove(struct platform_device *pdev) | ||
85 | { | ||
86 | struct input_polled_dev *poll_dev = dev_get_drvdata(&pdev->dev); | ||
87 | |||
88 | input_unregister_polled_device(poll_dev); | ||
89 | input_free_polled_device(poll_dev); | ||
90 | dev_set_drvdata(&pdev->dev, NULL); | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static struct platform_driver rb532_button_driver = { | ||
96 | .probe = rb532_button_probe, | ||
97 | .remove = __devexit_p(rb532_button_remove), | ||
98 | .driver = { | ||
99 | .name = DRV_NAME, | ||
100 | .owner = THIS_MODULE, | ||
101 | }, | ||
102 | }; | ||
103 | |||
104 | static int __init rb532_button_init(void) | ||
105 | { | ||
106 | return platform_driver_register(&rb532_button_driver); | ||
107 | } | ||
108 | |||
109 | static void __exit rb532_button_exit(void) | ||
110 | { | ||
111 | platform_driver_unregister(&rb532_button_driver); | ||
112 | } | ||
113 | |||
114 | module_init(rb532_button_init); | ||
115 | module_exit(rb532_button_exit); | ||
116 | |||
117 | MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>"); | ||
118 | MODULE_LICENSE("GPL"); | ||
119 | MODULE_DESCRIPTION("Support for S1 button on Routerboard 532"); | ||
120 | MODULE_ALIAS("platform:" DRV_NAME); | ||
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c new file mode 100644 index 000000000000..5bb3ab51b8c6 --- /dev/null +++ b/drivers/input/misc/rotary_encoder.c | |||
@@ -0,0 +1,221 @@ | |||
1 | /* | ||
2 | * rotary_encoder.c | ||
3 | * | ||
4 | * (c) 2009 Daniel Mack <daniel@caiaq.de> | ||
5 | * | ||
6 | * state machine code inspired by code from Tim Ruetz | ||
7 | * | ||
8 | * A generic driver for rotary encoders connected to GPIO lines. | ||
9 | * See file:Documentation/input/rotary_encoder.txt for more information | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/input.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/gpio.h> | ||
24 | #include <linux/rotary_encoder.h> | ||
25 | |||
26 | #define DRV_NAME "rotary-encoder" | ||
27 | |||
28 | struct rotary_encoder { | ||
29 | unsigned int irq_a; | ||
30 | unsigned int irq_b; | ||
31 | unsigned int pos; | ||
32 | unsigned int armed; | ||
33 | unsigned int dir; | ||
34 | struct input_dev *input; | ||
35 | struct rotary_encoder_platform_data *pdata; | ||
36 | }; | ||
37 | |||
38 | static irqreturn_t rotary_encoder_irq(int irq, void *dev_id) | ||
39 | { | ||
40 | struct rotary_encoder *encoder = dev_id; | ||
41 | struct rotary_encoder_platform_data *pdata = encoder->pdata; | ||
42 | int a = !!gpio_get_value(pdata->gpio_a); | ||
43 | int b = !!gpio_get_value(pdata->gpio_b); | ||
44 | int state; | ||
45 | |||
46 | a ^= pdata->inverted_a; | ||
47 | b ^= pdata->inverted_b; | ||
48 | state = (a << 1) | b; | ||
49 | |||
50 | switch (state) { | ||
51 | |||
52 | case 0x0: | ||
53 | if (!encoder->armed) | ||
54 | break; | ||
55 | |||
56 | if (encoder->dir) { | ||
57 | /* turning counter-clockwise */ | ||
58 | encoder->pos += pdata->steps; | ||
59 | encoder->pos--; | ||
60 | encoder->pos %= pdata->steps; | ||
61 | } else { | ||
62 | /* turning clockwise */ | ||
63 | encoder->pos++; | ||
64 | encoder->pos %= pdata->steps; | ||
65 | } | ||
66 | |||
67 | input_report_abs(encoder->input, pdata->axis, encoder->pos); | ||
68 | input_sync(encoder->input); | ||
69 | |||
70 | encoder->armed = 0; | ||
71 | break; | ||
72 | |||
73 | case 0x1: | ||
74 | case 0x2: | ||
75 | if (encoder->armed) | ||
76 | encoder->dir = state - 1; | ||
77 | break; | ||
78 | |||
79 | case 0x3: | ||
80 | encoder->armed = 1; | ||
81 | break; | ||
82 | } | ||
83 | |||
84 | return IRQ_HANDLED; | ||
85 | } | ||
86 | |||
87 | static int __devinit rotary_encoder_probe(struct platform_device *pdev) | ||
88 | { | ||
89 | struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data; | ||
90 | struct rotary_encoder *encoder; | ||
91 | struct input_dev *input; | ||
92 | int err; | ||
93 | |||
94 | if (!pdata || !pdata->steps) { | ||
95 | dev_err(&pdev->dev, "invalid platform data\n"); | ||
96 | return -ENOENT; | ||
97 | } | ||
98 | |||
99 | encoder = kzalloc(sizeof(struct rotary_encoder), GFP_KERNEL); | ||
100 | input = input_allocate_device(); | ||
101 | if (!encoder || !input) { | ||
102 | dev_err(&pdev->dev, "failed to allocate memory for device\n"); | ||
103 | err = -ENOMEM; | ||
104 | goto exit_free_mem; | ||
105 | } | ||
106 | |||
107 | encoder->input = input; | ||
108 | encoder->pdata = pdata; | ||
109 | encoder->irq_a = gpio_to_irq(pdata->gpio_a); | ||
110 | encoder->irq_b = gpio_to_irq(pdata->gpio_b); | ||
111 | |||
112 | /* create and register the input driver */ | ||
113 | input->name = pdev->name; | ||
114 | input->id.bustype = BUS_HOST; | ||
115 | input->dev.parent = &pdev->dev; | ||
116 | input->evbit[0] = BIT_MASK(EV_ABS); | ||
117 | input_set_abs_params(encoder->input, | ||
118 | pdata->axis, 0, pdata->steps, 0, 1); | ||
119 | |||
120 | err = input_register_device(input); | ||
121 | if (err) { | ||
122 | dev_err(&pdev->dev, "failed to register input device\n"); | ||
123 | goto exit_free_mem; | ||
124 | } | ||
125 | |||
126 | /* request the GPIOs */ | ||
127 | err = gpio_request(pdata->gpio_a, DRV_NAME); | ||
128 | if (err) { | ||
129 | dev_err(&pdev->dev, "unable to request GPIO %d\n", | ||
130 | pdata->gpio_a); | ||
131 | goto exit_unregister_input; | ||
132 | } | ||
133 | |||
134 | err = gpio_request(pdata->gpio_b, DRV_NAME); | ||
135 | if (err) { | ||
136 | dev_err(&pdev->dev, "unable to request GPIO %d\n", | ||
137 | pdata->gpio_b); | ||
138 | goto exit_free_gpio_a; | ||
139 | } | ||
140 | |||
141 | /* request the IRQs */ | ||
142 | err = request_irq(encoder->irq_a, &rotary_encoder_irq, | ||
143 | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, | ||
144 | DRV_NAME, encoder); | ||
145 | if (err) { | ||
146 | dev_err(&pdev->dev, "unable to request IRQ %d\n", | ||
147 | encoder->irq_a); | ||
148 | goto exit_free_gpio_b; | ||
149 | } | ||
150 | |||
151 | err = request_irq(encoder->irq_b, &rotary_encoder_irq, | ||
152 | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, | ||
153 | DRV_NAME, encoder); | ||
154 | if (err) { | ||
155 | dev_err(&pdev->dev, "unable to request IRQ %d\n", | ||
156 | encoder->irq_b); | ||
157 | goto exit_free_irq_a; | ||
158 | } | ||
159 | |||
160 | platform_set_drvdata(pdev, encoder); | ||
161 | |||
162 | return 0; | ||
163 | |||
164 | exit_free_irq_a: | ||
165 | free_irq(encoder->irq_a, encoder); | ||
166 | exit_free_gpio_b: | ||
167 | gpio_free(pdata->gpio_b); | ||
168 | exit_free_gpio_a: | ||
169 | gpio_free(pdata->gpio_a); | ||
170 | exit_unregister_input: | ||
171 | input_unregister_device(input); | ||
172 | input = NULL; /* so we don't try to free it */ | ||
173 | exit_free_mem: | ||
174 | input_free_device(input); | ||
175 | kfree(encoder); | ||
176 | return err; | ||
177 | } | ||
178 | |||
179 | static int __devexit rotary_encoder_remove(struct platform_device *pdev) | ||
180 | { | ||
181 | struct rotary_encoder *encoder = platform_get_drvdata(pdev); | ||
182 | struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data; | ||
183 | |||
184 | free_irq(encoder->irq_a, encoder); | ||
185 | free_irq(encoder->irq_b, encoder); | ||
186 | gpio_free(pdata->gpio_a); | ||
187 | gpio_free(pdata->gpio_b); | ||
188 | input_unregister_device(encoder->input); | ||
189 | platform_set_drvdata(pdev, NULL); | ||
190 | kfree(encoder); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static struct platform_driver rotary_encoder_driver = { | ||
196 | .probe = rotary_encoder_probe, | ||
197 | .remove = __devexit_p(rotary_encoder_remove), | ||
198 | .driver = { | ||
199 | .name = DRV_NAME, | ||
200 | .owner = THIS_MODULE, | ||
201 | } | ||
202 | }; | ||
203 | |||
204 | static int __init rotary_encoder_init(void) | ||
205 | { | ||
206 | return platform_driver_register(&rotary_encoder_driver); | ||
207 | } | ||
208 | |||
209 | static void __exit rotary_encoder_exit(void) | ||
210 | { | ||
211 | platform_driver_unregister(&rotary_encoder_driver); | ||
212 | } | ||
213 | |||
214 | module_init(rotary_encoder_init); | ||
215 | module_exit(rotary_encoder_exit); | ||
216 | |||
217 | MODULE_ALIAS("platform:" DRV_NAME); | ||
218 | MODULE_DESCRIPTION("GPIO rotary encoder driver"); | ||
219 | MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); | ||
220 | MODULE_LICENSE("GPL v2"); | ||
221 | |||
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index 4f38e6f7dfdd..c66cc3d08c2f 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig | |||
@@ -292,4 +292,15 @@ config MOUSE_PXA930_TRKBALL | |||
292 | help | 292 | help |
293 | Say Y here to support PXA930 Trackball mouse. | 293 | Say Y here to support PXA930 Trackball mouse. |
294 | 294 | ||
295 | config MOUSE_MAPLE | ||
296 | tristate "Maple mouse (for the Dreamcast)" | ||
297 | depends on MAPLE | ||
298 | help | ||
299 | This driver supports the Maple mouse on the SEGA Dreamcast. | ||
300 | |||
301 | Most Dreamcast users, who have a mouse, will say Y here. | ||
302 | |||
303 | To compile this driver as a module choose M here: the module will be | ||
304 | called maplemouse. | ||
305 | |||
295 | endif | 306 | endif |
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile index 8c8a1f236e28..472189468d67 100644 --- a/drivers/input/mouse/Makefile +++ b/drivers/input/mouse/Makefile | |||
@@ -6,18 +6,19 @@ | |||
6 | 6 | ||
7 | obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o | 7 | obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o |
8 | obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o | 8 | obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o |
9 | obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o | ||
10 | obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o | 9 | obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o |
11 | obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o | 10 | obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o |
11 | obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o | ||
12 | obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o | ||
12 | obj-$(CONFIG_MOUSE_INPORT) += inport.o | 13 | obj-$(CONFIG_MOUSE_INPORT) += inport.o |
13 | obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o | 14 | obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o |
15 | obj-$(CONFIG_MOUSE_MAPLE) += maplemouse.o | ||
14 | obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o | 16 | obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o |
15 | obj-$(CONFIG_MOUSE_PS2) += psmouse.o | 17 | obj-$(CONFIG_MOUSE_PS2) += psmouse.o |
16 | obj-$(CONFIG_MOUSE_PXA930_TRKBALL) += pxa930_trkball.o | 18 | obj-$(CONFIG_MOUSE_PXA930_TRKBALL) += pxa930_trkball.o |
19 | obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o | ||
17 | obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o | 20 | obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o |
18 | obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o | ||
19 | obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o | 21 | obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o |
20 | obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o | ||
21 | 22 | ||
22 | psmouse-objs := psmouse-base.o synaptics.o | 23 | psmouse-objs := psmouse-base.o synaptics.o |
23 | 24 | ||
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c index 55cd0fa68339..a1ad2f1a7bb3 100644 --- a/drivers/input/mouse/hgpk.c +++ b/drivers/input/mouse/hgpk.c | |||
@@ -472,7 +472,7 @@ static enum hgpk_model_t hgpk_get_model(struct psmouse *psmouse) | |||
472 | return -EIO; | 472 | return -EIO; |
473 | } | 473 | } |
474 | 474 | ||
475 | hgpk_dbg(psmouse, "ID: %02x %02x %02x", param[0], param[1], param[2]); | 475 | hgpk_dbg(psmouse, "ID: %02x %02x %02x\n", param[0], param[1], param[2]); |
476 | 476 | ||
477 | /* HGPK signature: 0x67, 0x00, 0x<model> */ | 477 | /* HGPK signature: 0x67, 0x00, 0x<model> */ |
478 | if (param[0] != 0x67 || param[1] != 0x00) | 478 | if (param[0] != 0x67 || param[1] != 0x00) |
diff --git a/drivers/input/mouse/maplemouse.c b/drivers/input/mouse/maplemouse.c new file mode 100644 index 000000000000..d196abfb68bc --- /dev/null +++ b/drivers/input/mouse/maplemouse.c | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * SEGA Dreamcast mouse driver | ||
3 | * Based on drivers/usb/usbmouse.c | ||
4 | * | ||
5 | * Copyright Yaegashi Takeshi, 2001 | ||
6 | * Adrian McMenamin, 2008 | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/input.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/timer.h> | ||
15 | #include <linux/maple.h> | ||
16 | |||
17 | MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); | ||
18 | MODULE_DESCRIPTION("SEGA Dreamcast mouse driver"); | ||
19 | MODULE_LICENSE("GPL"); | ||
20 | |||
21 | struct dc_mouse { | ||
22 | struct input_dev *dev; | ||
23 | struct maple_device *mdev; | ||
24 | }; | ||
25 | |||
26 | static void dc_mouse_callback(struct mapleq *mq) | ||
27 | { | ||
28 | int buttons, relx, rely, relz; | ||
29 | struct maple_device *mapledev = mq->dev; | ||
30 | struct dc_mouse *mse = maple_get_drvdata(mapledev); | ||
31 | struct input_dev *dev = mse->dev; | ||
32 | unsigned char *res = mq->recvbuf; | ||
33 | |||
34 | buttons = ~res[8]; | ||
35 | relx = *(unsigned short *)(res + 12) - 512; | ||
36 | rely = *(unsigned short *)(res + 14) - 512; | ||
37 | relz = *(unsigned short *)(res + 16) - 512; | ||
38 | |||
39 | input_report_key(dev, BTN_LEFT, buttons & 4); | ||
40 | input_report_key(dev, BTN_MIDDLE, buttons & 9); | ||
41 | input_report_key(dev, BTN_RIGHT, buttons & 2); | ||
42 | input_report_rel(dev, REL_X, relx); | ||
43 | input_report_rel(dev, REL_Y, rely); | ||
44 | input_report_rel(dev, REL_WHEEL, relz); | ||
45 | input_sync(dev); | ||
46 | } | ||
47 | |||
48 | static int dc_mouse_open(struct input_dev *dev) | ||
49 | { | ||
50 | struct dc_mouse *mse = dev->dev.platform_data; | ||
51 | |||
52 | maple_getcond_callback(mse->mdev, dc_mouse_callback, HZ/50, | ||
53 | MAPLE_FUNC_MOUSE); | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | static void dc_mouse_close(struct input_dev *dev) | ||
59 | { | ||
60 | struct dc_mouse *mse = dev->dev.platform_data; | ||
61 | |||
62 | maple_getcond_callback(mse->mdev, dc_mouse_callback, 0, | ||
63 | MAPLE_FUNC_MOUSE); | ||
64 | } | ||
65 | |||
66 | |||
67 | static int __devinit probe_maple_mouse(struct device *dev) | ||
68 | { | ||
69 | struct maple_device *mdev = to_maple_dev(dev); | ||
70 | struct maple_driver *mdrv = to_maple_driver(dev->driver); | ||
71 | struct input_dev *input_dev; | ||
72 | struct dc_mouse *mse; | ||
73 | int error; | ||
74 | |||
75 | mse = kzalloc(sizeof(struct dc_mouse), GFP_KERNEL); | ||
76 | input_dev = input_allocate_device(); | ||
77 | |||
78 | if (!mse || !input_dev) { | ||
79 | error = -ENOMEM; | ||
80 | goto fail; | ||
81 | } | ||
82 | |||
83 | mse->dev = input_dev; | ||
84 | mse->mdev = mdev; | ||
85 | |||
86 | input_set_drvdata(input_dev, mse); | ||
87 | input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); | ||
88 | input_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | | ||
89 | BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); | ||
90 | input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) | | ||
91 | BIT_MASK(REL_WHEEL); | ||
92 | input_dev->name = mdev->product_name; | ||
93 | input_dev->id.bustype = BUS_HOST; | ||
94 | input_dev->open = dc_mouse_open; | ||
95 | input_dev->close = dc_mouse_close; | ||
96 | |||
97 | mdev->driver = mdrv; | ||
98 | maple_set_drvdata(mdev, mse); | ||
99 | |||
100 | error = input_register_device(input_dev); | ||
101 | if (error) | ||
102 | goto fail; | ||
103 | |||
104 | return 0; | ||
105 | |||
106 | fail: | ||
107 | input_free_device(input_dev); | ||
108 | maple_set_drvdata(mdev, NULL); | ||
109 | kfree(mse); | ||
110 | mdev->driver = NULL; | ||
111 | return error; | ||
112 | } | ||
113 | |||
114 | static int __devexit remove_maple_mouse(struct device *dev) | ||
115 | { | ||
116 | struct maple_device *mdev = to_maple_dev(dev); | ||
117 | struct dc_mouse *mse = maple_get_drvdata(mdev); | ||
118 | |||
119 | mdev->callback = NULL; | ||
120 | input_unregister_device(mse->dev); | ||
121 | maple_set_drvdata(mdev, NULL); | ||
122 | kfree(mse); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static struct maple_driver dc_mouse_driver = { | ||
128 | .function = MAPLE_FUNC_MOUSE, | ||
129 | .drv = { | ||
130 | .name = "Dreamcast_mouse", | ||
131 | .probe = probe_maple_mouse, | ||
132 | .remove = __devexit_p(remove_maple_mouse), | ||
133 | }, | ||
134 | }; | ||
135 | |||
136 | static int __init dc_mouse_init(void) | ||
137 | { | ||
138 | return maple_driver_register(&dc_mouse_driver); | ||
139 | } | ||
140 | |||
141 | static void __exit dc_mouse_exit(void) | ||
142 | { | ||
143 | maple_driver_unregister(&dc_mouse_driver); | ||
144 | } | ||
145 | |||
146 | module_init(dc_mouse_init); | ||
147 | module_exit(dc_mouse_exit); | ||
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c index fd09c8df81f2..3941f97cfa60 100644 --- a/drivers/input/mouse/pc110pad.c +++ b/drivers/input/mouse/pc110pad.c | |||
@@ -108,14 +108,10 @@ static int pc110pad_open(struct input_dev *dev) | |||
108 | */ | 108 | */ |
109 | static int __init pc110pad_init(void) | 109 | static int __init pc110pad_init(void) |
110 | { | 110 | { |
111 | struct pci_dev *dev; | ||
112 | int err; | 111 | int err; |
113 | 112 | ||
114 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); | 113 | if (!no_pci_devices()) |
115 | if (dev) { | ||
116 | pci_dev_put(dev); | ||
117 | return -ENODEV; | 114 | return -ENODEV; |
118 | } | ||
119 | 115 | ||
120 | if (!request_region(pc110pad_io, 4, "pc110pad")) { | 116 | if (!request_region(pc110pad_io, 4, "pc110pad")) { |
121 | printk(KERN_ERR "pc110pad: I/O area %#x-%#x in use.\n", | 117 | printk(KERN_ERR "pc110pad: I/O area %#x-%#x in use.\n", |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 6fa2deff7446..fb8a3cd3ffd0 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -151,6 +151,14 @@ static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = { | |||
151 | DMI_MATCH(DMI_PRODUCT_VERSION, "01"), | 151 | DMI_MATCH(DMI_PRODUCT_VERSION, "01"), |
152 | }, | 152 | }, |
153 | }, | 153 | }, |
154 | { | ||
155 | .ident = "HP DV9700", | ||
156 | .matches = { | ||
157 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
158 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), | ||
159 | DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), | ||
160 | }, | ||
161 | }, | ||
154 | { } | 162 | { } |
155 | }; | 163 | }; |
156 | 164 | ||
@@ -369,6 +377,24 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = { | |||
369 | { } | 377 | { } |
370 | }; | 378 | }; |
371 | 379 | ||
380 | static struct dmi_system_id __initdata i8042_dmi_reset_table[] = { | ||
381 | { | ||
382 | .ident = "MSI Wind U-100", | ||
383 | .matches = { | ||
384 | DMI_MATCH(DMI_BOARD_NAME, "U-100"), | ||
385 | DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
386 | }, | ||
387 | }, | ||
388 | { | ||
389 | .ident = "LG Electronics X110", | ||
390 | .matches = { | ||
391 | DMI_MATCH(DMI_BOARD_NAME, "X110"), | ||
392 | DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."), | ||
393 | }, | ||
394 | }, | ||
395 | { } | ||
396 | }; | ||
397 | |||
372 | #ifdef CONFIG_PNP | 398 | #ifdef CONFIG_PNP |
373 | static struct dmi_system_id __initdata i8042_dmi_nopnp_table[] = { | 399 | static struct dmi_system_id __initdata i8042_dmi_nopnp_table[] = { |
374 | { | 400 | { |
@@ -378,6 +404,13 @@ static struct dmi_system_id __initdata i8042_dmi_nopnp_table[] = { | |||
378 | DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), | 404 | DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), |
379 | }, | 405 | }, |
380 | }, | 406 | }, |
407 | { | ||
408 | .ident = "MSI Wind U-100", | ||
409 | .matches = { | ||
410 | DMI_MATCH(DMI_BOARD_NAME, "U-100"), | ||
411 | DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
412 | }, | ||
413 | }, | ||
381 | { } | 414 | { } |
382 | }; | 415 | }; |
383 | #endif | 416 | #endif |
@@ -690,6 +723,9 @@ static int __init i8042_platform_init(void) | |||
690 | #endif | 723 | #endif |
691 | 724 | ||
692 | #ifdef CONFIG_X86 | 725 | #ifdef CONFIG_X86 |
726 | if (dmi_check_system(i8042_dmi_reset_table)) | ||
727 | i8042_reset = 1; | ||
728 | |||
693 | if (dmi_check_system(i8042_dmi_noloop_table)) | 729 | if (dmi_check_system(i8042_dmi_noloop_table)) |
694 | i8042_noloop = 1; | 730 | i8042_noloop = 1; |
695 | 731 | ||
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 170f71ee5772..3cffb704e374 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -712,22 +712,43 @@ static int i8042_controller_check(void) | |||
712 | static int i8042_controller_selftest(void) | 712 | static int i8042_controller_selftest(void) |
713 | { | 713 | { |
714 | unsigned char param; | 714 | unsigned char param; |
715 | int i = 0; | ||
715 | 716 | ||
716 | if (!i8042_reset) | 717 | if (!i8042_reset) |
717 | return 0; | 718 | return 0; |
718 | 719 | ||
719 | if (i8042_command(¶m, I8042_CMD_CTL_TEST)) { | 720 | /* |
720 | printk(KERN_ERR "i8042.c: i8042 controller self test timeout.\n"); | 721 | * We try this 5 times; on some really fragile systems this does not |
721 | return -ENODEV; | 722 | * take the first time... |
722 | } | 723 | */ |
724 | do { | ||
725 | |||
726 | if (i8042_command(¶m, I8042_CMD_CTL_TEST)) { | ||
727 | printk(KERN_ERR "i8042.c: i8042 controller self test timeout.\n"); | ||
728 | return -ENODEV; | ||
729 | } | ||
730 | |||
731 | if (param == I8042_RET_CTL_TEST) | ||
732 | return 0; | ||
723 | 733 | ||
724 | if (param != I8042_RET_CTL_TEST) { | ||
725 | printk(KERN_ERR "i8042.c: i8042 controller selftest failed. (%#x != %#x)\n", | 734 | printk(KERN_ERR "i8042.c: i8042 controller selftest failed. (%#x != %#x)\n", |
726 | param, I8042_RET_CTL_TEST); | 735 | param, I8042_RET_CTL_TEST); |
727 | return -EIO; | 736 | msleep(50); |
728 | } | 737 | } while (i++ < 5); |
729 | 738 | ||
739 | #ifdef CONFIG_X86 | ||
740 | /* | ||
741 | * On x86, we don't fail entire i8042 initialization if controller | ||
742 | * reset fails in hopes that keyboard port will still be functional | ||
743 | * and user will still get a working keyboard. This is especially | ||
744 | * important on netbooks. On other arches we trust hardware more. | ||
745 | */ | ||
746 | printk(KERN_INFO | ||
747 | "i8042: giving up on controller selftest, continuing anyway...\n"); | ||
730 | return 0; | 748 | return 0; |
749 | #else | ||
750 | return -EIO; | ||
751 | #endif | ||
731 | } | 752 | } |
732 | 753 | ||
733 | /* | 754 | /* |
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index bb6486a8c070..b01fd61dadcc 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
@@ -29,6 +29,51 @@ config TOUCHSCREEN_ADS7846 | |||
29 | To compile this driver as a module, choose M here: the | 29 | To compile this driver as a module, choose M here: the |
30 | module will be called ads7846. | 30 | module will be called ads7846. |
31 | 31 | ||
32 | config TOUCHSCREEN_AD7877 | ||
33 | tristate "AD7877 based touchscreens" | ||
34 | depends on SPI_MASTER | ||
35 | help | ||
36 | Say Y here if you have a touchscreen interface using the | ||
37 | AD7877 controller, and your board-specific initialization | ||
38 | code includes that in its table of SPI devices. | ||
39 | |||
40 | If unsure, say N (but it's safe to say "Y"). | ||
41 | |||
42 | To compile this driver as a module, choose M here: the | ||
43 | module will be called ad7877. | ||
44 | |||
45 | config TOUCHSCREEN_AD7879_I2C | ||
46 | tristate "AD7879 based touchscreens: AD7879-1 I2C Interface" | ||
47 | depends on I2C | ||
48 | select TOUCHSCREEN_AD7879 | ||
49 | help | ||
50 | Say Y here if you have a touchscreen interface using the | ||
51 | AD7879-1 controller, and your board-specific initialization | ||
52 | code includes that in its table of I2C devices. | ||
53 | |||
54 | If unsure, say N (but it's safe to say "Y"). | ||
55 | |||
56 | To compile this driver as a module, choose M here: the | ||
57 | module will be called ad7879. | ||
58 | |||
59 | config TOUCHSCREEN_AD7879_SPI | ||
60 | tristate "AD7879 based touchscreens: AD7879 SPI Interface" | ||
61 | depends on SPI_MASTER && TOUCHSCREEN_AD7879_I2C = n | ||
62 | select TOUCHSCREEN_AD7879 | ||
63 | help | ||
64 | Say Y here if you have a touchscreen interface using the | ||
65 | AD7879 controller, and your board-specific initialization | ||
66 | code includes that in its table of SPI devices. | ||
67 | |||
68 | If unsure, say N (but it's safe to say "Y"). | ||
69 | |||
70 | To compile this driver as a module, choose M here: the | ||
71 | module will be called ad7879. | ||
72 | |||
73 | config TOUCHSCREEN_AD7879 | ||
74 | tristate | ||
75 | default n | ||
76 | |||
32 | config TOUCHSCREEN_BITSY | 77 | config TOUCHSCREEN_BITSY |
33 | tristate "Compaq iPAQ H3600 (Bitsy) touchscreen" | 78 | tristate "Compaq iPAQ H3600 (Bitsy) touchscreen" |
34 | depends on SA1100_BITSY | 79 | depends on SA1100_BITSY |
@@ -308,6 +353,19 @@ config TOUCHSCREEN_WM97XX_MAINSTONE | |||
308 | To compile this driver as a module, choose M here: the | 353 | To compile this driver as a module, choose M here: the |
309 | module will be called mainstone-wm97xx. | 354 | module will be called mainstone-wm97xx. |
310 | 355 | ||
356 | config TOUCHSCREEN_WM97XX_ZYLONITE | ||
357 | tristate "Zylonite accelerated touch" | ||
358 | depends on TOUCHSCREEN_WM97XX && MACH_ZYLONITE | ||
359 | select TOUCHSCREEN_WM9713 | ||
360 | help | ||
361 | Say Y here for support for streaming mode with the touchscreen | ||
362 | on Zylonite systems. | ||
363 | |||
364 | If unsure, say N. | ||
365 | |||
366 | To compile this driver as a module, choose M here: the | ||
367 | module will be called zylonite-wm97xx. | ||
368 | |||
311 | config TOUCHSCREEN_USB_COMPOSITE | 369 | config TOUCHSCREEN_USB_COMPOSITE |
312 | tristate "USB Touchscreen Driver" | 370 | tristate "USB Touchscreen Driver" |
313 | depends on USB_ARCH_HAS_HCD | 371 | depends on USB_ARCH_HAS_HCD |
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index d3375aff46fe..6700f7b9d165 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile | |||
@@ -6,6 +6,8 @@ | |||
6 | 6 | ||
7 | wm97xx-ts-y := wm97xx-core.o | 7 | wm97xx-ts-y := wm97xx-core.o |
8 | 8 | ||
9 | obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o | ||
10 | obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o | ||
9 | obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o | 11 | obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o |
10 | obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o | 12 | obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o |
11 | obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o | 13 | obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o |
@@ -34,3 +36,4 @@ wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o | |||
34 | wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o | 36 | wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o |
35 | wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o | 37 | wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o |
36 | obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o | 38 | obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o |
39 | obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o | ||
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c new file mode 100644 index 000000000000..ecaeb7e8e75e --- /dev/null +++ b/drivers/input/touchscreen/ad7877.c | |||
@@ -0,0 +1,844 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2008 Michael Hennerich, Analog Devices Inc. | ||
3 | * | ||
4 | * Description: AD7877 based touchscreen, sensor (ADCs), DAC and GPIO driver | ||
5 | * Based on: ads7846.c | ||
6 | * | ||
7 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, see the file COPYING, or write | ||
21 | * to the Free Software Foundation, Inc., | ||
22 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
23 | * | ||
24 | * History: | ||
25 | * Copyright (c) 2005 David Brownell | ||
26 | * Copyright (c) 2006 Nokia Corporation | ||
27 | * Various changes: Imre Deak <imre.deak@nokia.com> | ||
28 | * | ||
29 | * Using code from: | ||
30 | * - corgi_ts.c | ||
31 | * Copyright (C) 2004-2005 Richard Purdie | ||
32 | * - omap_ts.[hc], ads7846.h, ts_osk.c | ||
33 | * Copyright (C) 2002 MontaVista Software | ||
34 | * Copyright (C) 2004 Texas Instruments | ||
35 | * Copyright (C) 2005 Dirk Behme | ||
36 | */ | ||
37 | |||
38 | |||
39 | #include <linux/device.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/input.h> | ||
43 | #include <linux/interrupt.h> | ||
44 | #include <linux/slab.h> | ||
45 | #include <linux/spi/spi.h> | ||
46 | #include <linux/spi/ad7877.h> | ||
47 | #include <asm/irq.h> | ||
48 | |||
49 | #define TS_PEN_UP_TIMEOUT msecs_to_jiffies(50) | ||
50 | |||
51 | #define MAX_SPI_FREQ_HZ 20000000 | ||
52 | #define MAX_12BIT ((1<<12)-1) | ||
53 | |||
54 | #define AD7877_REG_ZEROS 0 | ||
55 | #define AD7877_REG_CTRL1 1 | ||
56 | #define AD7877_REG_CTRL2 2 | ||
57 | #define AD7877_REG_ALERT 3 | ||
58 | #define AD7877_REG_AUX1HIGH 4 | ||
59 | #define AD7877_REG_AUX1LOW 5 | ||
60 | #define AD7877_REG_BAT1HIGH 6 | ||
61 | #define AD7877_REG_BAT1LOW 7 | ||
62 | #define AD7877_REG_BAT2HIGH 8 | ||
63 | #define AD7877_REG_BAT2LOW 9 | ||
64 | #define AD7877_REG_TEMP1HIGH 10 | ||
65 | #define AD7877_REG_TEMP1LOW 11 | ||
66 | #define AD7877_REG_SEQ0 12 | ||
67 | #define AD7877_REG_SEQ1 13 | ||
68 | #define AD7877_REG_DAC 14 | ||
69 | #define AD7877_REG_NONE1 15 | ||
70 | #define AD7877_REG_EXTWRITE 15 | ||
71 | #define AD7877_REG_XPLUS 16 | ||
72 | #define AD7877_REG_YPLUS 17 | ||
73 | #define AD7877_REG_Z2 18 | ||
74 | #define AD7877_REG_aux1 19 | ||
75 | #define AD7877_REG_aux2 20 | ||
76 | #define AD7877_REG_aux3 21 | ||
77 | #define AD7877_REG_bat1 22 | ||
78 | #define AD7877_REG_bat2 23 | ||
79 | #define AD7877_REG_temp1 24 | ||
80 | #define AD7877_REG_temp2 25 | ||
81 | #define AD7877_REG_Z1 26 | ||
82 | #define AD7877_REG_GPIOCTRL1 27 | ||
83 | #define AD7877_REG_GPIOCTRL2 28 | ||
84 | #define AD7877_REG_GPIODATA 29 | ||
85 | #define AD7877_REG_NONE2 30 | ||
86 | #define AD7877_REG_NONE3 31 | ||
87 | |||
88 | #define AD7877_SEQ_YPLUS_BIT (1<<11) | ||
89 | #define AD7877_SEQ_XPLUS_BIT (1<<10) | ||
90 | #define AD7877_SEQ_Z2_BIT (1<<9) | ||
91 | #define AD7877_SEQ_AUX1_BIT (1<<8) | ||
92 | #define AD7877_SEQ_AUX2_BIT (1<<7) | ||
93 | #define AD7877_SEQ_AUX3_BIT (1<<6) | ||
94 | #define AD7877_SEQ_BAT1_BIT (1<<5) | ||
95 | #define AD7877_SEQ_BAT2_BIT (1<<4) | ||
96 | #define AD7877_SEQ_TEMP1_BIT (1<<3) | ||
97 | #define AD7877_SEQ_TEMP2_BIT (1<<2) | ||
98 | #define AD7877_SEQ_Z1_BIT (1<<1) | ||
99 | |||
100 | enum { | ||
101 | AD7877_SEQ_YPOS = 0, | ||
102 | AD7877_SEQ_XPOS = 1, | ||
103 | AD7877_SEQ_Z2 = 2, | ||
104 | AD7877_SEQ_AUX1 = 3, | ||
105 | AD7877_SEQ_AUX2 = 4, | ||
106 | AD7877_SEQ_AUX3 = 5, | ||
107 | AD7877_SEQ_BAT1 = 6, | ||
108 | AD7877_SEQ_BAT2 = 7, | ||
109 | AD7877_SEQ_TEMP1 = 8, | ||
110 | AD7877_SEQ_TEMP2 = 9, | ||
111 | AD7877_SEQ_Z1 = 10, | ||
112 | AD7877_NR_SENSE = 11, | ||
113 | }; | ||
114 | |||
115 | /* DAC Register Default RANGE 0 to Vcc, Volatge Mode, DAC On */ | ||
116 | #define AD7877_DAC_CONF 0x1 | ||
117 | |||
118 | /* If gpio3 is set AUX3/GPIO3 acts as GPIO Output */ | ||
119 | #define AD7877_EXTW_GPIO_3_CONF 0x1C4 | ||
120 | #define AD7877_EXTW_GPIO_DATA 0x200 | ||
121 | |||
122 | /* Control REG 2 */ | ||
123 | #define AD7877_TMR(x) ((x & 0x3) << 0) | ||
124 | #define AD7877_REF(x) ((x & 0x1) << 2) | ||
125 | #define AD7877_POL(x) ((x & 0x1) << 3) | ||
126 | #define AD7877_FCD(x) ((x & 0x3) << 4) | ||
127 | #define AD7877_PM(x) ((x & 0x3) << 6) | ||
128 | #define AD7877_ACQ(x) ((x & 0x3) << 8) | ||
129 | #define AD7877_AVG(x) ((x & 0x3) << 10) | ||
130 | |||
131 | /* Control REG 1 */ | ||
132 | #define AD7877_SER (1 << 11) /* non-differential */ | ||
133 | #define AD7877_DFR (0 << 11) /* differential */ | ||
134 | |||
135 | #define AD7877_MODE_NOC (0) /* Do not convert */ | ||
136 | #define AD7877_MODE_SCC (1) /* Single channel conversion */ | ||
137 | #define AD7877_MODE_SEQ0 (2) /* Sequence 0 in Slave Mode */ | ||
138 | #define AD7877_MODE_SEQ1 (3) /* Sequence 1 in Master Mode */ | ||
139 | |||
140 | #define AD7877_CHANADD(x) ((x&0xF)<<7) | ||
141 | #define AD7877_READADD(x) ((x)<<2) | ||
142 | #define AD7877_WRITEADD(x) ((x)<<12) | ||
143 | |||
144 | #define AD7877_READ_CHAN(x) (AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_SER | \ | ||
145 | AD7877_MODE_SCC | AD7877_CHANADD(AD7877_REG_ ## x) | \ | ||
146 | AD7877_READADD(AD7877_REG_ ## x)) | ||
147 | |||
148 | #define AD7877_MM_SEQUENCE (AD7877_SEQ_YPLUS_BIT | AD7877_SEQ_XPLUS_BIT | \ | ||
149 | AD7877_SEQ_Z2_BIT | AD7877_SEQ_Z1_BIT) | ||
150 | |||
151 | /* | ||
152 | * Non-touchscreen sensors only use single-ended conversions. | ||
153 | */ | ||
154 | |||
155 | struct ser_req { | ||
156 | u16 reset; | ||
157 | u16 ref_on; | ||
158 | u16 command; | ||
159 | u16 sample; | ||
160 | struct spi_message msg; | ||
161 | struct spi_transfer xfer[6]; | ||
162 | }; | ||
163 | |||
164 | struct ad7877 { | ||
165 | struct input_dev *input; | ||
166 | char phys[32]; | ||
167 | |||
168 | struct spi_device *spi; | ||
169 | u16 model; | ||
170 | u16 vref_delay_usecs; | ||
171 | u16 x_plate_ohms; | ||
172 | u16 pressure_max; | ||
173 | |||
174 | u16 cmd_crtl1; | ||
175 | u16 cmd_crtl2; | ||
176 | u16 cmd_dummy; | ||
177 | u16 dac; | ||
178 | |||
179 | u8 stopacq_polarity; | ||
180 | u8 first_conversion_delay; | ||
181 | u8 acquisition_time; | ||
182 | u8 averaging; | ||
183 | u8 pen_down_acc_interval; | ||
184 | |||
185 | u16 conversion_data[AD7877_NR_SENSE]; | ||
186 | |||
187 | struct spi_transfer xfer[AD7877_NR_SENSE + 2]; | ||
188 | struct spi_message msg; | ||
189 | |||
190 | struct mutex mutex; | ||
191 | unsigned disabled:1; /* P: mutex */ | ||
192 | unsigned gpio3:1; /* P: mutex */ | ||
193 | unsigned gpio4:1; /* P: mutex */ | ||
194 | |||
195 | spinlock_t lock; | ||
196 | struct timer_list timer; /* P: lock */ | ||
197 | unsigned pending:1; /* P: lock */ | ||
198 | }; | ||
199 | |||
200 | static int gpio3; | ||
201 | module_param(gpio3, int, 0); | ||
202 | MODULE_PARM_DESC(gpio3, "If gpio3 is set to 1 AUX3 acts as GPIO3"); | ||
203 | |||
204 | /* | ||
205 | * ad7877_read/write are only used for initial setup and for sysfs controls. | ||
206 | * The main traffic is done using spi_async() in the interrupt handler. | ||
207 | */ | ||
208 | |||
209 | static int ad7877_read(struct spi_device *spi, u16 reg) | ||
210 | { | ||
211 | struct ser_req *req; | ||
212 | int status, ret; | ||
213 | |||
214 | req = kzalloc(sizeof *req, GFP_KERNEL); | ||
215 | if (!req) | ||
216 | return -ENOMEM; | ||
217 | |||
218 | spi_message_init(&req->msg); | ||
219 | |||
220 | req->command = (u16) (AD7877_WRITEADD(AD7877_REG_CTRL1) | | ||
221 | AD7877_READADD(reg)); | ||
222 | req->xfer[0].tx_buf = &req->command; | ||
223 | req->xfer[0].len = 2; | ||
224 | |||
225 | req->xfer[1].rx_buf = &req->sample; | ||
226 | req->xfer[1].len = 2; | ||
227 | |||
228 | spi_message_add_tail(&req->xfer[0], &req->msg); | ||
229 | spi_message_add_tail(&req->xfer[1], &req->msg); | ||
230 | |||
231 | status = spi_sync(spi, &req->msg); | ||
232 | ret = status ? : req->sample; | ||
233 | |||
234 | kfree(req); | ||
235 | |||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | static int ad7877_write(struct spi_device *spi, u16 reg, u16 val) | ||
240 | { | ||
241 | struct ser_req *req; | ||
242 | int status; | ||
243 | |||
244 | req = kzalloc(sizeof *req, GFP_KERNEL); | ||
245 | if (!req) | ||
246 | return -ENOMEM; | ||
247 | |||
248 | spi_message_init(&req->msg); | ||
249 | |||
250 | req->command = (u16) (AD7877_WRITEADD(reg) | (val & MAX_12BIT)); | ||
251 | req->xfer[0].tx_buf = &req->command; | ||
252 | req->xfer[0].len = 2; | ||
253 | |||
254 | spi_message_add_tail(&req->xfer[0], &req->msg); | ||
255 | |||
256 | status = spi_sync(spi, &req->msg); | ||
257 | |||
258 | kfree(req); | ||
259 | |||
260 | return status; | ||
261 | } | ||
262 | |||
263 | static int ad7877_read_adc(struct spi_device *spi, unsigned command) | ||
264 | { | ||
265 | struct ad7877 *ts = dev_get_drvdata(&spi->dev); | ||
266 | struct ser_req *req; | ||
267 | int status; | ||
268 | int sample; | ||
269 | int i; | ||
270 | |||
271 | req = kzalloc(sizeof *req, GFP_KERNEL); | ||
272 | if (!req) | ||
273 | return -ENOMEM; | ||
274 | |||
275 | spi_message_init(&req->msg); | ||
276 | |||
277 | /* activate reference, so it has time to settle; */ | ||
278 | req->ref_on = AD7877_WRITEADD(AD7877_REG_CTRL2) | | ||
279 | AD7877_POL(ts->stopacq_polarity) | | ||
280 | AD7877_AVG(0) | AD7877_PM(2) | AD7877_TMR(0) | | ||
281 | AD7877_ACQ(ts->acquisition_time) | AD7877_FCD(0); | ||
282 | |||
283 | req->reset = AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_MODE_NOC; | ||
284 | |||
285 | req->command = (u16) command; | ||
286 | |||
287 | req->xfer[0].tx_buf = &req->reset; | ||
288 | req->xfer[0].len = 2; | ||
289 | |||
290 | req->xfer[1].tx_buf = &req->ref_on; | ||
291 | req->xfer[1].len = 2; | ||
292 | req->xfer[1].delay_usecs = ts->vref_delay_usecs; | ||
293 | |||
294 | req->xfer[2].tx_buf = &req->command; | ||
295 | req->xfer[2].len = 2; | ||
296 | req->xfer[2].delay_usecs = ts->vref_delay_usecs; | ||
297 | |||
298 | req->xfer[3].rx_buf = &req->sample; | ||
299 | req->xfer[3].len = 2; | ||
300 | |||
301 | req->xfer[4].tx_buf = &ts->cmd_crtl2; /*REF OFF*/ | ||
302 | req->xfer[4].len = 2; | ||
303 | |||
304 | req->xfer[5].tx_buf = &ts->cmd_crtl1; /*DEFAULT*/ | ||
305 | req->xfer[5].len = 2; | ||
306 | |||
307 | /* group all the transfers together, so we can't interfere with | ||
308 | * reading touchscreen state; disable penirq while sampling | ||
309 | */ | ||
310 | for (i = 0; i < 6; i++) | ||
311 | spi_message_add_tail(&req->xfer[i], &req->msg); | ||
312 | |||
313 | status = spi_sync(spi, &req->msg); | ||
314 | sample = req->sample; | ||
315 | |||
316 | kfree(req); | ||
317 | |||
318 | return status ? : sample; | ||
319 | } | ||
320 | |||
321 | static void ad7877_rx(struct ad7877 *ts) | ||
322 | { | ||
323 | struct input_dev *input_dev = ts->input; | ||
324 | unsigned Rt; | ||
325 | u16 x, y, z1, z2; | ||
326 | |||
327 | x = ts->conversion_data[AD7877_SEQ_XPOS] & MAX_12BIT; | ||
328 | y = ts->conversion_data[AD7877_SEQ_YPOS] & MAX_12BIT; | ||
329 | z1 = ts->conversion_data[AD7877_SEQ_Z1] & MAX_12BIT; | ||
330 | z2 = ts->conversion_data[AD7877_SEQ_Z2] & MAX_12BIT; | ||
331 | |||
332 | /* | ||
333 | * The samples processed here are already preprocessed by the AD7877. | ||
334 | * The preprocessing function consists of an averaging filter. | ||
335 | * The combination of 'first conversion delay' and averaging provides a robust solution, | ||
336 | * discarding the spurious noise in the signal and keeping only the data of interest. | ||
337 | * The size of the averaging filter is programmable. (dev.platform_data, see linux/spi/ad7877.h) | ||
338 | * Other user-programmable conversion controls include variable acquisition time, | ||
339 | * and first conversion delay. Up to 16 averages can be taken per conversion. | ||
340 | */ | ||
341 | |||
342 | if (likely(x && z1)) { | ||
343 | /* compute touch pressure resistance using equation #1 */ | ||
344 | Rt = (z2 - z1) * x * ts->x_plate_ohms; | ||
345 | Rt /= z1; | ||
346 | Rt = (Rt + 2047) >> 12; | ||
347 | |||
348 | input_report_abs(input_dev, ABS_X, x); | ||
349 | input_report_abs(input_dev, ABS_Y, y); | ||
350 | input_report_abs(input_dev, ABS_PRESSURE, Rt); | ||
351 | input_sync(input_dev); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | static inline void ad7877_ts_event_release(struct ad7877 *ts) | ||
356 | { | ||
357 | struct input_dev *input_dev = ts->input; | ||
358 | |||
359 | input_report_abs(input_dev, ABS_PRESSURE, 0); | ||
360 | input_sync(input_dev); | ||
361 | } | ||
362 | |||
363 | static void ad7877_timer(unsigned long handle) | ||
364 | { | ||
365 | struct ad7877 *ts = (void *)handle; | ||
366 | |||
367 | ad7877_ts_event_release(ts); | ||
368 | } | ||
369 | |||
370 | static irqreturn_t ad7877_irq(int irq, void *handle) | ||
371 | { | ||
372 | struct ad7877 *ts = handle; | ||
373 | unsigned long flags; | ||
374 | int status; | ||
375 | |||
376 | /* | ||
377 | * The repeated conversion sequencer controlled by TMR kicked off | ||
378 | * too fast. We ignore the last and process the sample sequence | ||
379 | * currently in the queue. It can't be older than 9.4ms, and we | ||
380 | * need to avoid that ts->msg doesn't get issued twice while in work. | ||
381 | */ | ||
382 | |||
383 | spin_lock_irqsave(&ts->lock, flags); | ||
384 | if (!ts->pending) { | ||
385 | ts->pending = 1; | ||
386 | |||
387 | status = spi_async(ts->spi, &ts->msg); | ||
388 | if (status) | ||
389 | dev_err(&ts->spi->dev, "spi_sync --> %d\n", status); | ||
390 | } | ||
391 | spin_unlock_irqrestore(&ts->lock, flags); | ||
392 | |||
393 | return IRQ_HANDLED; | ||
394 | } | ||
395 | |||
396 | static void ad7877_callback(void *_ts) | ||
397 | { | ||
398 | struct ad7877 *ts = _ts; | ||
399 | |||
400 | spin_lock_irq(&ts->lock); | ||
401 | |||
402 | ad7877_rx(ts); | ||
403 | ts->pending = 0; | ||
404 | mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT); | ||
405 | |||
406 | spin_unlock_irq(&ts->lock); | ||
407 | } | ||
408 | |||
409 | static void ad7877_disable(struct ad7877 *ts) | ||
410 | { | ||
411 | mutex_lock(&ts->mutex); | ||
412 | |||
413 | if (!ts->disabled) { | ||
414 | ts->disabled = 1; | ||
415 | disable_irq(ts->spi->irq); | ||
416 | |||
417 | /* Wait for spi_async callback */ | ||
418 | while (ts->pending) | ||
419 | msleep(1); | ||
420 | |||
421 | if (del_timer_sync(&ts->timer)) | ||
422 | ad7877_ts_event_release(ts); | ||
423 | } | ||
424 | |||
425 | /* we know the chip's in lowpower mode since we always | ||
426 | * leave it that way after every request | ||
427 | */ | ||
428 | |||
429 | mutex_unlock(&ts->mutex); | ||
430 | } | ||
431 | |||
432 | static void ad7877_enable(struct ad7877 *ts) | ||
433 | { | ||
434 | mutex_lock(&ts->mutex); | ||
435 | |||
436 | if (ts->disabled) { | ||
437 | ts->disabled = 0; | ||
438 | enable_irq(ts->spi->irq); | ||
439 | } | ||
440 | |||
441 | mutex_unlock(&ts->mutex); | ||
442 | } | ||
443 | |||
444 | #define SHOW(name) static ssize_t \ | ||
445 | name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \ | ||
446 | { \ | ||
447 | struct ad7877 *ts = dev_get_drvdata(dev); \ | ||
448 | ssize_t v = ad7877_read_adc(ts->spi, \ | ||
449 | AD7877_READ_CHAN(name)); \ | ||
450 | if (v < 0) \ | ||
451 | return v; \ | ||
452 | return sprintf(buf, "%u\n", (unsigned) v); \ | ||
453 | } \ | ||
454 | static DEVICE_ATTR(name, S_IRUGO, name ## _show, NULL); | ||
455 | |||
456 | SHOW(aux1) | ||
457 | SHOW(aux2) | ||
458 | SHOW(aux3) | ||
459 | SHOW(bat1) | ||
460 | SHOW(bat2) | ||
461 | SHOW(temp1) | ||
462 | SHOW(temp2) | ||
463 | |||
464 | static ssize_t ad7877_disable_show(struct device *dev, | ||
465 | struct device_attribute *attr, char *buf) | ||
466 | { | ||
467 | struct ad7877 *ts = dev_get_drvdata(dev); | ||
468 | |||
469 | return sprintf(buf, "%u\n", ts->disabled); | ||
470 | } | ||
471 | |||
472 | static ssize_t ad7877_disable_store(struct device *dev, | ||
473 | struct device_attribute *attr, | ||
474 | const char *buf, size_t count) | ||
475 | { | ||
476 | struct ad7877 *ts = dev_get_drvdata(dev); | ||
477 | unsigned long val; | ||
478 | int error; | ||
479 | |||
480 | error = strict_strtoul(buf, 10, &val); | ||
481 | if (error) | ||
482 | return error; | ||
483 | |||
484 | if (val) | ||
485 | ad7877_disable(ts); | ||
486 | else | ||
487 | ad7877_enable(ts); | ||
488 | |||
489 | return count; | ||
490 | } | ||
491 | |||
492 | static DEVICE_ATTR(disable, 0664, ad7877_disable_show, ad7877_disable_store); | ||
493 | |||
494 | static ssize_t ad7877_dac_show(struct device *dev, | ||
495 | struct device_attribute *attr, char *buf) | ||
496 | { | ||
497 | struct ad7877 *ts = dev_get_drvdata(dev); | ||
498 | |||
499 | return sprintf(buf, "%u\n", ts->dac); | ||
500 | } | ||
501 | |||
502 | static ssize_t ad7877_dac_store(struct device *dev, | ||
503 | struct device_attribute *attr, | ||
504 | const char *buf, size_t count) | ||
505 | { | ||
506 | struct ad7877 *ts = dev_get_drvdata(dev); | ||
507 | unsigned long val; | ||
508 | int error; | ||
509 | |||
510 | error = strict_strtoul(buf, 10, &val); | ||
511 | if (error) | ||
512 | return error; | ||
513 | |||
514 | mutex_lock(&ts->mutex); | ||
515 | ts->dac = val & 0xFF; | ||
516 | ad7877_write(ts->spi, AD7877_REG_DAC, (ts->dac << 4) | AD7877_DAC_CONF); | ||
517 | mutex_unlock(&ts->mutex); | ||
518 | |||
519 | return count; | ||
520 | } | ||
521 | |||
522 | static DEVICE_ATTR(dac, 0664, ad7877_dac_show, ad7877_dac_store); | ||
523 | |||
524 | static ssize_t ad7877_gpio3_show(struct device *dev, | ||
525 | struct device_attribute *attr, char *buf) | ||
526 | { | ||
527 | struct ad7877 *ts = dev_get_drvdata(dev); | ||
528 | |||
529 | return sprintf(buf, "%u\n", ts->gpio3); | ||
530 | } | ||
531 | |||
532 | static ssize_t ad7877_gpio3_store(struct device *dev, | ||
533 | struct device_attribute *attr, | ||
534 | const char *buf, size_t count) | ||
535 | { | ||
536 | struct ad7877 *ts = dev_get_drvdata(dev); | ||
537 | unsigned long val; | ||
538 | int error; | ||
539 | |||
540 | error = strict_strtoul(buf, 10, &val); | ||
541 | if (error) | ||
542 | return error; | ||
543 | |||
544 | mutex_lock(&ts->mutex); | ||
545 | ts->gpio3 = !!val; | ||
546 | ad7877_write(ts->spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_DATA | | ||
547 | (ts->gpio4 << 4) | (ts->gpio3 << 5)); | ||
548 | mutex_unlock(&ts->mutex); | ||
549 | |||
550 | return count; | ||
551 | } | ||
552 | |||
553 | static DEVICE_ATTR(gpio3, 0664, ad7877_gpio3_show, ad7877_gpio3_store); | ||
554 | |||
555 | static ssize_t ad7877_gpio4_show(struct device *dev, | ||
556 | struct device_attribute *attr, char *buf) | ||
557 | { | ||
558 | struct ad7877 *ts = dev_get_drvdata(dev); | ||
559 | |||
560 | return sprintf(buf, "%u\n", ts->gpio4); | ||
561 | } | ||
562 | |||
563 | static ssize_t ad7877_gpio4_store(struct device *dev, | ||
564 | struct device_attribute *attr, | ||
565 | const char *buf, size_t count) | ||
566 | { | ||
567 | struct ad7877 *ts = dev_get_drvdata(dev); | ||
568 | unsigned long val; | ||
569 | int error; | ||
570 | |||
571 | error = strict_strtoul(buf, 10, &val); | ||
572 | if (error) | ||
573 | return error; | ||
574 | |||
575 | mutex_lock(&ts->mutex); | ||
576 | ts->gpio4 = !!val; | ||
577 | ad7877_write(ts->spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_DATA | | ||
578 | (ts->gpio4 << 4) | (ts->gpio3 << 5)); | ||
579 | mutex_unlock(&ts->mutex); | ||
580 | |||
581 | return count; | ||
582 | } | ||
583 | |||
584 | static DEVICE_ATTR(gpio4, 0664, ad7877_gpio4_show, ad7877_gpio4_store); | ||
585 | |||
586 | static struct attribute *ad7877_attributes[] = { | ||
587 | &dev_attr_temp1.attr, | ||
588 | &dev_attr_temp2.attr, | ||
589 | &dev_attr_aux1.attr, | ||
590 | &dev_attr_aux2.attr, | ||
591 | &dev_attr_bat1.attr, | ||
592 | &dev_attr_bat2.attr, | ||
593 | &dev_attr_disable.attr, | ||
594 | &dev_attr_dac.attr, | ||
595 | &dev_attr_gpio4.attr, | ||
596 | NULL | ||
597 | }; | ||
598 | |||
599 | static const struct attribute_group ad7877_attr_group = { | ||
600 | .attrs = ad7877_attributes, | ||
601 | }; | ||
602 | |||
603 | static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts) | ||
604 | { | ||
605 | struct spi_message *m; | ||
606 | int i; | ||
607 | |||
608 | ts->cmd_crtl2 = AD7877_WRITEADD(AD7877_REG_CTRL2) | | ||
609 | AD7877_POL(ts->stopacq_polarity) | | ||
610 | AD7877_AVG(ts->averaging) | AD7877_PM(1) | | ||
611 | AD7877_TMR(ts->pen_down_acc_interval) | | ||
612 | AD7877_ACQ(ts->acquisition_time) | | ||
613 | AD7877_FCD(ts->first_conversion_delay); | ||
614 | |||
615 | ad7877_write(spi, AD7877_REG_CTRL2, ts->cmd_crtl2); | ||
616 | |||
617 | ts->cmd_crtl1 = AD7877_WRITEADD(AD7877_REG_CTRL1) | | ||
618 | AD7877_READADD(AD7877_REG_XPLUS-1) | | ||
619 | AD7877_MODE_SEQ1 | AD7877_DFR; | ||
620 | |||
621 | ad7877_write(spi, AD7877_REG_CTRL1, ts->cmd_crtl1); | ||
622 | |||
623 | ts->cmd_dummy = 0; | ||
624 | |||
625 | m = &ts->msg; | ||
626 | |||
627 | spi_message_init(m); | ||
628 | |||
629 | m->complete = ad7877_callback; | ||
630 | m->context = ts; | ||
631 | |||
632 | ts->xfer[0].tx_buf = &ts->cmd_crtl1; | ||
633 | ts->xfer[0].len = 2; | ||
634 | |||
635 | spi_message_add_tail(&ts->xfer[0], m); | ||
636 | |||
637 | ts->xfer[1].tx_buf = &ts->cmd_dummy; /* Send ZERO */ | ||
638 | ts->xfer[1].len = 2; | ||
639 | |||
640 | spi_message_add_tail(&ts->xfer[1], m); | ||
641 | |||
642 | for (i = 0; i < 11; i++) { | ||
643 | ts->xfer[i + 2].rx_buf = &ts->conversion_data[AD7877_SEQ_YPOS + i]; | ||
644 | ts->xfer[i + 2].len = 2; | ||
645 | spi_message_add_tail(&ts->xfer[i + 2], m); | ||
646 | } | ||
647 | } | ||
648 | |||
649 | static int __devinit ad7877_probe(struct spi_device *spi) | ||
650 | { | ||
651 | struct ad7877 *ts; | ||
652 | struct input_dev *input_dev; | ||
653 | struct ad7877_platform_data *pdata = spi->dev.platform_data; | ||
654 | int err; | ||
655 | u16 verify; | ||
656 | |||
657 | if (!spi->irq) { | ||
658 | dev_dbg(&spi->dev, "no IRQ?\n"); | ||
659 | return -ENODEV; | ||
660 | } | ||
661 | |||
662 | if (!pdata) { | ||
663 | dev_dbg(&spi->dev, "no platform data?\n"); | ||
664 | return -ENODEV; | ||
665 | } | ||
666 | |||
667 | /* don't exceed max specified SPI CLK frequency */ | ||
668 | if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) { | ||
669 | dev_dbg(&spi->dev, "SPI CLK %d Hz?\n",spi->max_speed_hz); | ||
670 | return -EINVAL; | ||
671 | } | ||
672 | |||
673 | ts = kzalloc(sizeof(struct ad7877), GFP_KERNEL); | ||
674 | input_dev = input_allocate_device(); | ||
675 | if (!ts || !input_dev) { | ||
676 | err = -ENOMEM; | ||
677 | goto err_free_mem; | ||
678 | } | ||
679 | |||
680 | dev_set_drvdata(&spi->dev, ts); | ||
681 | ts->spi = spi; | ||
682 | ts->input = input_dev; | ||
683 | |||
684 | setup_timer(&ts->timer, ad7877_timer, (unsigned long) ts); | ||
685 | mutex_init(&ts->mutex); | ||
686 | spin_lock_init(&ts->lock); | ||
687 | |||
688 | ts->model = pdata->model ? : 7877; | ||
689 | ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100; | ||
690 | ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; | ||
691 | ts->pressure_max = pdata->pressure_max ? : ~0; | ||
692 | |||
693 | ts->stopacq_polarity = pdata->stopacq_polarity; | ||
694 | ts->first_conversion_delay = pdata->first_conversion_delay; | ||
695 | ts->acquisition_time = pdata->acquisition_time; | ||
696 | ts->averaging = pdata->averaging; | ||
697 | ts->pen_down_acc_interval = pdata->pen_down_acc_interval; | ||
698 | |||
699 | snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev)); | ||
700 | |||
701 | input_dev->name = "AD7877 Touchscreen"; | ||
702 | input_dev->phys = ts->phys; | ||
703 | input_dev->dev.parent = &spi->dev; | ||
704 | |||
705 | __set_bit(EV_ABS, input_dev->evbit); | ||
706 | __set_bit(ABS_X, input_dev->absbit); | ||
707 | __set_bit(ABS_Y, input_dev->absbit); | ||
708 | __set_bit(ABS_PRESSURE, input_dev->absbit); | ||
709 | |||
710 | input_set_abs_params(input_dev, ABS_X, | ||
711 | pdata->x_min ? : 0, | ||
712 | pdata->x_max ? : MAX_12BIT, | ||
713 | 0, 0); | ||
714 | input_set_abs_params(input_dev, ABS_Y, | ||
715 | pdata->y_min ? : 0, | ||
716 | pdata->y_max ? : MAX_12BIT, | ||
717 | 0, 0); | ||
718 | input_set_abs_params(input_dev, ABS_PRESSURE, | ||
719 | pdata->pressure_min, pdata->pressure_max, 0, 0); | ||
720 | |||
721 | ad7877_write(spi, AD7877_REG_SEQ1, AD7877_MM_SEQUENCE); | ||
722 | |||
723 | verify = ad7877_read(spi, AD7877_REG_SEQ1); | ||
724 | |||
725 | if (verify != AD7877_MM_SEQUENCE){ | ||
726 | dev_err(&spi->dev, "%s: Failed to probe %s\n", | ||
727 | dev_name(&spi->dev), input_dev->name); | ||
728 | err = -ENODEV; | ||
729 | goto err_free_mem; | ||
730 | } | ||
731 | |||
732 | if (gpio3) | ||
733 | ad7877_write(spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_3_CONF); | ||
734 | |||
735 | ad7877_setup_ts_def_msg(spi, ts); | ||
736 | |||
737 | /* Request AD7877 /DAV GPIO interrupt */ | ||
738 | |||
739 | err = request_irq(spi->irq, ad7877_irq, IRQF_TRIGGER_FALLING, | ||
740 | spi->dev.driver->name, ts); | ||
741 | if (err) { | ||
742 | dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); | ||
743 | goto err_free_mem; | ||
744 | } | ||
745 | |||
746 | err = sysfs_create_group(&spi->dev.kobj, &ad7877_attr_group); | ||
747 | if (err) | ||
748 | goto err_free_irq; | ||
749 | |||
750 | err = device_create_file(&spi->dev, | ||
751 | gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3); | ||
752 | if (err) | ||
753 | goto err_remove_attr_group; | ||
754 | |||
755 | err = input_register_device(input_dev); | ||
756 | if (err) | ||
757 | goto err_remove_attr; | ||
758 | |||
759 | return 0; | ||
760 | |||
761 | err_remove_attr: | ||
762 | device_remove_file(&spi->dev, | ||
763 | gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3); | ||
764 | err_remove_attr_group: | ||
765 | sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group); | ||
766 | err_free_irq: | ||
767 | free_irq(spi->irq, ts); | ||
768 | err_free_mem: | ||
769 | input_free_device(input_dev); | ||
770 | kfree(ts); | ||
771 | dev_set_drvdata(&spi->dev, NULL); | ||
772 | return err; | ||
773 | } | ||
774 | |||
775 | static int __devexit ad7877_remove(struct spi_device *spi) | ||
776 | { | ||
777 | struct ad7877 *ts = dev_get_drvdata(&spi->dev); | ||
778 | |||
779 | sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group); | ||
780 | device_remove_file(&spi->dev, | ||
781 | gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3); | ||
782 | |||
783 | ad7877_disable(ts); | ||
784 | free_irq(ts->spi->irq, ts); | ||
785 | |||
786 | input_unregister_device(ts->input); | ||
787 | kfree(ts); | ||
788 | |||
789 | dev_dbg(&spi->dev, "unregistered touchscreen\n"); | ||
790 | dev_set_drvdata(&spi->dev, NULL); | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | #ifdef CONFIG_PM | ||
796 | static int ad7877_suspend(struct spi_device *spi, pm_message_t message) | ||
797 | { | ||
798 | struct ad7877 *ts = dev_get_drvdata(&spi->dev); | ||
799 | |||
800 | ad7877_disable(ts); | ||
801 | |||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | static int ad7877_resume(struct spi_device *spi) | ||
806 | { | ||
807 | struct ad7877 *ts = dev_get_drvdata(&spi->dev); | ||
808 | |||
809 | ad7877_enable(ts); | ||
810 | |||
811 | return 0; | ||
812 | } | ||
813 | #else | ||
814 | #define ad7877_suspend NULL | ||
815 | #define ad7877_resume NULL | ||
816 | #endif | ||
817 | |||
818 | static struct spi_driver ad7877_driver = { | ||
819 | .driver = { | ||
820 | .name = "ad7877", | ||
821 | .bus = &spi_bus_type, | ||
822 | .owner = THIS_MODULE, | ||
823 | }, | ||
824 | .probe = ad7877_probe, | ||
825 | .remove = __devexit_p(ad7877_remove), | ||
826 | .suspend = ad7877_suspend, | ||
827 | .resume = ad7877_resume, | ||
828 | }; | ||
829 | |||
830 | static int __init ad7877_init(void) | ||
831 | { | ||
832 | return spi_register_driver(&ad7877_driver); | ||
833 | } | ||
834 | module_init(ad7877_init); | ||
835 | |||
836 | static void __exit ad7877_exit(void) | ||
837 | { | ||
838 | spi_unregister_driver(&ad7877_driver); | ||
839 | } | ||
840 | module_exit(ad7877_exit); | ||
841 | |||
842 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); | ||
843 | MODULE_DESCRIPTION("AD7877 touchscreen Driver"); | ||
844 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c new file mode 100644 index 000000000000..5d8a70398807 --- /dev/null +++ b/drivers/input/touchscreen/ad7879.c | |||
@@ -0,0 +1,781 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Michael Hennerich, Analog Devices Inc. | ||
3 | * | ||
4 | * Description: AD7879 based touchscreen, and GPIO driver (I2C/SPI Interface) | ||
5 | * | ||
6 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, see the file COPYING, or write | ||
20 | * to the Free Software Foundation, Inc., | ||
21 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * | ||
23 | * History: | ||
24 | * Copyright (c) 2005 David Brownell | ||
25 | * Copyright (c) 2006 Nokia Corporation | ||
26 | * Various changes: Imre Deak <imre.deak@nokia.com> | ||
27 | * | ||
28 | * Using code from: | ||
29 | * - corgi_ts.c | ||
30 | * Copyright (C) 2004-2005 Richard Purdie | ||
31 | * - omap_ts.[hc], ads7846.h, ts_osk.c | ||
32 | * Copyright (C) 2002 MontaVista Software | ||
33 | * Copyright (C) 2004 Texas Instruments | ||
34 | * Copyright (C) 2005 Dirk Behme | ||
35 | * - ad7877.c | ||
36 | * Copyright (C) 2006-2008 Analog Devices Inc. | ||
37 | */ | ||
38 | |||
39 | #include <linux/device.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/input.h> | ||
43 | #include <linux/interrupt.h> | ||
44 | #include <linux/irq.h> | ||
45 | #include <linux/slab.h> | ||
46 | #include <linux/workqueue.h> | ||
47 | #include <linux/spi/spi.h> | ||
48 | #include <linux/i2c.h> | ||
49 | |||
50 | #include <linux/spi/ad7879.h> | ||
51 | |||
52 | #define AD7879_REG_ZEROS 0 | ||
53 | #define AD7879_REG_CTRL1 1 | ||
54 | #define AD7879_REG_CTRL2 2 | ||
55 | #define AD7879_REG_CTRL3 3 | ||
56 | #define AD7879_REG_AUX1HIGH 4 | ||
57 | #define AD7879_REG_AUX1LOW 5 | ||
58 | #define AD7879_REG_TEMP1HIGH 6 | ||
59 | #define AD7879_REG_TEMP1LOW 7 | ||
60 | #define AD7879_REG_XPLUS 8 | ||
61 | #define AD7879_REG_YPLUS 9 | ||
62 | #define AD7879_REG_Z1 10 | ||
63 | #define AD7879_REG_Z2 11 | ||
64 | #define AD7879_REG_AUXVBAT 12 | ||
65 | #define AD7879_REG_TEMP 13 | ||
66 | #define AD7879_REG_REVID 14 | ||
67 | |||
68 | /* Control REG 1 */ | ||
69 | #define AD7879_TMR(x) ((x & 0xFF) << 0) | ||
70 | #define AD7879_ACQ(x) ((x & 0x3) << 8) | ||
71 | #define AD7879_MODE_NOC (0 << 10) /* Do not convert */ | ||
72 | #define AD7879_MODE_SCC (1 << 10) /* Single channel conversion */ | ||
73 | #define AD7879_MODE_SEQ0 (2 << 10) /* Sequence 0 in Slave Mode */ | ||
74 | #define AD7879_MODE_SEQ1 (3 << 10) /* Sequence 1 in Master Mode */ | ||
75 | #define AD7879_MODE_INT (1 << 15) /* PENIRQ disabled INT enabled */ | ||
76 | |||
77 | /* Control REG 2 */ | ||
78 | #define AD7879_FCD(x) ((x & 0x3) << 0) | ||
79 | #define AD7879_RESET (1 << 4) | ||
80 | #define AD7879_MFS(x) ((x & 0x3) << 5) | ||
81 | #define AD7879_AVG(x) ((x & 0x3) << 7) | ||
82 | #define AD7879_SER (1 << 9) /* non-differential */ | ||
83 | #define AD7879_DFR (0 << 9) /* differential */ | ||
84 | #define AD7879_GPIOPOL (1 << 10) | ||
85 | #define AD7879_GPIODIR (1 << 11) | ||
86 | #define AD7879_GPIO_DATA (1 << 12) | ||
87 | #define AD7879_GPIO_EN (1 << 13) | ||
88 | #define AD7879_PM(x) ((x & 0x3) << 14) | ||
89 | #define AD7879_PM_SHUTDOWN (0) | ||
90 | #define AD7879_PM_DYN (1) | ||
91 | #define AD7879_PM_FULLON (2) | ||
92 | |||
93 | /* Control REG 3 */ | ||
94 | #define AD7879_TEMPMASK_BIT (1<<15) | ||
95 | #define AD7879_AUXVBATMASK_BIT (1<<14) | ||
96 | #define AD7879_INTMODE_BIT (1<<13) | ||
97 | #define AD7879_GPIOALERTMASK_BIT (1<<12) | ||
98 | #define AD7879_AUXLOW_BIT (1<<11) | ||
99 | #define AD7879_AUXHIGH_BIT (1<<10) | ||
100 | #define AD7879_TEMPLOW_BIT (1<<9) | ||
101 | #define AD7879_TEMPHIGH_BIT (1<<8) | ||
102 | #define AD7879_YPLUS_BIT (1<<7) | ||
103 | #define AD7879_XPLUS_BIT (1<<6) | ||
104 | #define AD7879_Z1_BIT (1<<5) | ||
105 | #define AD7879_Z2_BIT (1<<4) | ||
106 | #define AD7879_AUX_BIT (1<<3) | ||
107 | #define AD7879_VBAT_BIT (1<<2) | ||
108 | #define AD7879_TEMP_BIT (1<<1) | ||
109 | |||
110 | enum { | ||
111 | AD7879_SEQ_XPOS = 0, | ||
112 | AD7879_SEQ_YPOS = 1, | ||
113 | AD7879_SEQ_Z1 = 2, | ||
114 | AD7879_SEQ_Z2 = 3, | ||
115 | AD7879_NR_SENSE = 4, | ||
116 | }; | ||
117 | |||
118 | #define MAX_12BIT ((1<<12)-1) | ||
119 | #define TS_PEN_UP_TIMEOUT msecs_to_jiffies(50) | ||
120 | |||
121 | #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) | ||
122 | #define AD7879_DEVID 0x7A | ||
123 | typedef struct spi_device bus_device; | ||
124 | #elif defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) | ||
125 | #define AD7879_DEVID 0x79 | ||
126 | typedef struct i2c_client bus_device; | ||
127 | #endif | ||
128 | |||
129 | struct ad7879 { | ||
130 | bus_device *bus; | ||
131 | struct input_dev *input; | ||
132 | struct work_struct work; | ||
133 | struct timer_list timer; | ||
134 | |||
135 | struct mutex mutex; | ||
136 | unsigned disabled:1; /* P: mutex */ | ||
137 | |||
138 | #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) | ||
139 | struct spi_message msg; | ||
140 | struct spi_transfer xfer[AD7879_NR_SENSE + 1]; | ||
141 | u16 cmd; | ||
142 | #endif | ||
143 | u16 conversion_data[AD7879_NR_SENSE]; | ||
144 | char phys[32]; | ||
145 | u8 first_conversion_delay; | ||
146 | u8 acquisition_time; | ||
147 | u8 averaging; | ||
148 | u8 pen_down_acc_interval; | ||
149 | u8 median; | ||
150 | u16 x_plate_ohms; | ||
151 | u16 pressure_max; | ||
152 | u16 gpio_init; | ||
153 | u16 cmd_crtl1; | ||
154 | u16 cmd_crtl2; | ||
155 | u16 cmd_crtl3; | ||
156 | unsigned gpio:1; | ||
157 | }; | ||
158 | |||
159 | static int ad7879_read(bus_device *, u8); | ||
160 | static int ad7879_write(bus_device *, u8, u16); | ||
161 | static void ad7879_collect(struct ad7879 *); | ||
162 | |||
163 | static void ad7879_report(struct ad7879 *ts) | ||
164 | { | ||
165 | struct input_dev *input_dev = ts->input; | ||
166 | unsigned Rt; | ||
167 | u16 x, y, z1, z2; | ||
168 | |||
169 | x = ts->conversion_data[AD7879_SEQ_XPOS] & MAX_12BIT; | ||
170 | y = ts->conversion_data[AD7879_SEQ_YPOS] & MAX_12BIT; | ||
171 | z1 = ts->conversion_data[AD7879_SEQ_Z1] & MAX_12BIT; | ||
172 | z2 = ts->conversion_data[AD7879_SEQ_Z2] & MAX_12BIT; | ||
173 | |||
174 | /* | ||
175 | * The samples processed here are already preprocessed by the AD7879. | ||
176 | * The preprocessing function consists of a median and an averaging filter. | ||
177 | * The combination of these two techniques provides a robust solution, | ||
178 | * discarding the spurious noise in the signal and keeping only the data of interest. | ||
179 | * The size of both filters is programmable. (dev.platform_data, see linux/spi/ad7879.h) | ||
180 | * Other user-programmable conversion controls include variable acquisition time, | ||
181 | * and first conversion delay. Up to 16 averages can be taken per conversion. | ||
182 | */ | ||
183 | |||
184 | if (likely(x && z1)) { | ||
185 | /* compute touch pressure resistance using equation #1 */ | ||
186 | Rt = (z2 - z1) * x * ts->x_plate_ohms; | ||
187 | Rt /= z1; | ||
188 | Rt = (Rt + 2047) >> 12; | ||
189 | |||
190 | input_report_abs(input_dev, ABS_X, x); | ||
191 | input_report_abs(input_dev, ABS_Y, y); | ||
192 | input_report_abs(input_dev, ABS_PRESSURE, Rt); | ||
193 | input_sync(input_dev); | ||
194 | } | ||
195 | } | ||
196 | |||
197 | static void ad7879_work(struct work_struct *work) | ||
198 | { | ||
199 | struct ad7879 *ts = container_of(work, struct ad7879, work); | ||
200 | |||
201 | /* use keventd context to read the result registers */ | ||
202 | ad7879_collect(ts); | ||
203 | ad7879_report(ts); | ||
204 | mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT); | ||
205 | } | ||
206 | |||
207 | static void ad7879_ts_event_release(struct ad7879 *ts) | ||
208 | { | ||
209 | struct input_dev *input_dev = ts->input; | ||
210 | |||
211 | input_report_abs(input_dev, ABS_PRESSURE, 0); | ||
212 | input_sync(input_dev); | ||
213 | } | ||
214 | |||
215 | static void ad7879_timer(unsigned long handle) | ||
216 | { | ||
217 | struct ad7879 *ts = (void *)handle; | ||
218 | |||
219 | ad7879_ts_event_release(ts); | ||
220 | } | ||
221 | |||
222 | static irqreturn_t ad7879_irq(int irq, void *handle) | ||
223 | { | ||
224 | struct ad7879 *ts = handle; | ||
225 | |||
226 | /* The repeated conversion sequencer controlled by TMR kicked off too fast. | ||
227 | * We ignore the last and process the sample sequence currently in the queue. | ||
228 | * It can't be older than 9.4ms | ||
229 | */ | ||
230 | |||
231 | if (!work_pending(&ts->work)) | ||
232 | schedule_work(&ts->work); | ||
233 | |||
234 | return IRQ_HANDLED; | ||
235 | } | ||
236 | |||
237 | static void ad7879_setup(struct ad7879 *ts) | ||
238 | { | ||
239 | ts->cmd_crtl3 = AD7879_YPLUS_BIT | | ||
240 | AD7879_XPLUS_BIT | | ||
241 | AD7879_Z2_BIT | | ||
242 | AD7879_Z1_BIT | | ||
243 | AD7879_TEMPMASK_BIT | | ||
244 | AD7879_AUXVBATMASK_BIT | | ||
245 | AD7879_GPIOALERTMASK_BIT; | ||
246 | |||
247 | ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR | | ||
248 | AD7879_AVG(ts->averaging) | | ||
249 | AD7879_MFS(ts->median) | | ||
250 | AD7879_FCD(ts->first_conversion_delay) | | ||
251 | ts->gpio_init; | ||
252 | |||
253 | ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 | | ||
254 | AD7879_ACQ(ts->acquisition_time) | | ||
255 | AD7879_TMR(ts->pen_down_acc_interval); | ||
256 | |||
257 | ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2); | ||
258 | ad7879_write(ts->bus, AD7879_REG_CTRL3, ts->cmd_crtl3); | ||
259 | ad7879_write(ts->bus, AD7879_REG_CTRL1, ts->cmd_crtl1); | ||
260 | } | ||
261 | |||
262 | static void ad7879_disable(struct ad7879 *ts) | ||
263 | { | ||
264 | mutex_lock(&ts->mutex); | ||
265 | |||
266 | if (!ts->disabled) { | ||
267 | |||
268 | ts->disabled = 1; | ||
269 | disable_irq(ts->bus->irq); | ||
270 | |||
271 | cancel_work_sync(&ts->work); | ||
272 | |||
273 | if (del_timer_sync(&ts->timer)) | ||
274 | ad7879_ts_event_release(ts); | ||
275 | |||
276 | ad7879_write(ts->bus, AD7879_REG_CTRL2, | ||
277 | AD7879_PM(AD7879_PM_SHUTDOWN)); | ||
278 | } | ||
279 | |||
280 | mutex_unlock(&ts->mutex); | ||
281 | } | ||
282 | |||
283 | static void ad7879_enable(struct ad7879 *ts) | ||
284 | { | ||
285 | mutex_lock(&ts->mutex); | ||
286 | |||
287 | if (ts->disabled) { | ||
288 | ad7879_setup(ts); | ||
289 | ts->disabled = 0; | ||
290 | enable_irq(ts->bus->irq); | ||
291 | } | ||
292 | |||
293 | mutex_unlock(&ts->mutex); | ||
294 | } | ||
295 | |||
296 | static ssize_t ad7879_disable_show(struct device *dev, | ||
297 | struct device_attribute *attr, char *buf) | ||
298 | { | ||
299 | struct ad7879 *ts = dev_get_drvdata(dev); | ||
300 | |||
301 | return sprintf(buf, "%u\n", ts->disabled); | ||
302 | } | ||
303 | |||
304 | static ssize_t ad7879_disable_store(struct device *dev, | ||
305 | struct device_attribute *attr, | ||
306 | const char *buf, size_t count) | ||
307 | { | ||
308 | struct ad7879 *ts = dev_get_drvdata(dev); | ||
309 | unsigned long val; | ||
310 | int error; | ||
311 | |||
312 | error = strict_strtoul(buf, 10, &val); | ||
313 | if (error) | ||
314 | return error; | ||
315 | |||
316 | if (val) | ||
317 | ad7879_disable(ts); | ||
318 | else | ||
319 | ad7879_enable(ts); | ||
320 | |||
321 | return count; | ||
322 | } | ||
323 | |||
324 | static DEVICE_ATTR(disable, 0664, ad7879_disable_show, ad7879_disable_store); | ||
325 | |||
326 | static ssize_t ad7879_gpio_show(struct device *dev, | ||
327 | struct device_attribute *attr, char *buf) | ||
328 | { | ||
329 | struct ad7879 *ts = dev_get_drvdata(dev); | ||
330 | |||
331 | return sprintf(buf, "%u\n", ts->gpio); | ||
332 | } | ||
333 | |||
334 | static ssize_t ad7879_gpio_store(struct device *dev, | ||
335 | struct device_attribute *attr, | ||
336 | const char *buf, size_t count) | ||
337 | { | ||
338 | struct ad7879 *ts = dev_get_drvdata(dev); | ||
339 | unsigned long val; | ||
340 | int error; | ||
341 | |||
342 | error = strict_strtoul(buf, 10, &val); | ||
343 | if (error) | ||
344 | return error; | ||
345 | |||
346 | mutex_lock(&ts->mutex); | ||
347 | ts->gpio = !!val; | ||
348 | error = ad7879_write(ts->bus, AD7879_REG_CTRL2, | ||
349 | ts->gpio ? | ||
350 | ts->cmd_crtl2 & ~AD7879_GPIO_DATA : | ||
351 | ts->cmd_crtl2 | AD7879_GPIO_DATA); | ||
352 | mutex_unlock(&ts->mutex); | ||
353 | |||
354 | return error ? : count; | ||
355 | } | ||
356 | |||
357 | static DEVICE_ATTR(gpio, 0664, ad7879_gpio_show, ad7879_gpio_store); | ||
358 | |||
359 | static struct attribute *ad7879_attributes[] = { | ||
360 | &dev_attr_disable.attr, | ||
361 | &dev_attr_gpio.attr, | ||
362 | NULL | ||
363 | }; | ||
364 | |||
365 | static const struct attribute_group ad7879_attr_group = { | ||
366 | .attrs = ad7879_attributes, | ||
367 | }; | ||
368 | |||
369 | static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts) | ||
370 | { | ||
371 | struct input_dev *input_dev; | ||
372 | struct ad7879_platform_data *pdata = bus->dev.platform_data; | ||
373 | int err; | ||
374 | u16 revid; | ||
375 | |||
376 | if (!bus->irq) { | ||
377 | dev_err(&bus->dev, "no IRQ?\n"); | ||
378 | return -ENODEV; | ||
379 | } | ||
380 | |||
381 | if (!pdata) { | ||
382 | dev_err(&bus->dev, "no platform data?\n"); | ||
383 | return -ENODEV; | ||
384 | } | ||
385 | |||
386 | input_dev = input_allocate_device(); | ||
387 | if (!input_dev) | ||
388 | return -ENOMEM; | ||
389 | |||
390 | ts->input = input_dev; | ||
391 | |||
392 | setup_timer(&ts->timer, ad7879_timer, (unsigned long) ts); | ||
393 | INIT_WORK(&ts->work, ad7879_work); | ||
394 | mutex_init(&ts->mutex); | ||
395 | |||
396 | ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; | ||
397 | ts->pressure_max = pdata->pressure_max ? : ~0; | ||
398 | |||
399 | ts->first_conversion_delay = pdata->first_conversion_delay; | ||
400 | ts->acquisition_time = pdata->acquisition_time; | ||
401 | ts->averaging = pdata->averaging; | ||
402 | ts->pen_down_acc_interval = pdata->pen_down_acc_interval; | ||
403 | ts->median = pdata->median; | ||
404 | |||
405 | if (pdata->gpio_output) | ||
406 | ts->gpio_init = AD7879_GPIO_EN | | ||
407 | (pdata->gpio_default ? 0 : AD7879_GPIO_DATA); | ||
408 | else | ||
409 | ts->gpio_init = AD7879_GPIO_EN | AD7879_GPIODIR; | ||
410 | |||
411 | snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev)); | ||
412 | |||
413 | input_dev->name = "AD7879 Touchscreen"; | ||
414 | input_dev->phys = ts->phys; | ||
415 | input_dev->dev.parent = &bus->dev; | ||
416 | |||
417 | __set_bit(EV_ABS, input_dev->evbit); | ||
418 | __set_bit(ABS_X, input_dev->absbit); | ||
419 | __set_bit(ABS_Y, input_dev->absbit); | ||
420 | __set_bit(ABS_PRESSURE, input_dev->absbit); | ||
421 | |||
422 | input_set_abs_params(input_dev, ABS_X, | ||
423 | pdata->x_min ? : 0, | ||
424 | pdata->x_max ? : MAX_12BIT, | ||
425 | 0, 0); | ||
426 | input_set_abs_params(input_dev, ABS_Y, | ||
427 | pdata->y_min ? : 0, | ||
428 | pdata->y_max ? : MAX_12BIT, | ||
429 | 0, 0); | ||
430 | input_set_abs_params(input_dev, ABS_PRESSURE, | ||
431 | pdata->pressure_min, pdata->pressure_max, 0, 0); | ||
432 | |||
433 | err = ad7879_write(bus, AD7879_REG_CTRL2, AD7879_RESET); | ||
434 | |||
435 | if (err < 0) { | ||
436 | dev_err(&bus->dev, "Failed to write %s\n", input_dev->name); | ||
437 | goto err_free_mem; | ||
438 | } | ||
439 | |||
440 | revid = ad7879_read(bus, AD7879_REG_REVID); | ||
441 | |||
442 | if ((revid & 0xFF) != AD7879_DEVID) { | ||
443 | dev_err(&bus->dev, "Failed to probe %s\n", input_dev->name); | ||
444 | err = -ENODEV; | ||
445 | goto err_free_mem; | ||
446 | } | ||
447 | |||
448 | ad7879_setup(ts); | ||
449 | |||
450 | err = request_irq(bus->irq, ad7879_irq, | ||
451 | IRQF_TRIGGER_FALLING, bus->dev.driver->name, ts); | ||
452 | |||
453 | if (err) { | ||
454 | dev_err(&bus->dev, "irq %d busy?\n", bus->irq); | ||
455 | goto err_free_mem; | ||
456 | } | ||
457 | |||
458 | err = sysfs_create_group(&bus->dev.kobj, &ad7879_attr_group); | ||
459 | if (err) | ||
460 | goto err_free_irq; | ||
461 | |||
462 | err = input_register_device(input_dev); | ||
463 | if (err) | ||
464 | goto err_remove_attr; | ||
465 | |||
466 | dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n", | ||
467 | revid >> 8, bus->irq); | ||
468 | |||
469 | return 0; | ||
470 | |||
471 | err_remove_attr: | ||
472 | sysfs_remove_group(&bus->dev.kobj, &ad7879_attr_group); | ||
473 | err_free_irq: | ||
474 | free_irq(bus->irq, ts); | ||
475 | err_free_mem: | ||
476 | input_free_device(input_dev); | ||
477 | |||
478 | return err; | ||
479 | } | ||
480 | |||
481 | static int __devexit ad7879_destroy(bus_device *bus, struct ad7879 *ts) | ||
482 | { | ||
483 | ad7879_disable(ts); | ||
484 | sysfs_remove_group(&ts->bus->dev.kobj, &ad7879_attr_group); | ||
485 | free_irq(ts->bus->irq, ts); | ||
486 | input_unregister_device(ts->input); | ||
487 | dev_dbg(&bus->dev, "unregistered touchscreen\n"); | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | #ifdef CONFIG_PM | ||
493 | static int ad7879_suspend(bus_device *bus, pm_message_t message) | ||
494 | { | ||
495 | struct ad7879 *ts = dev_get_drvdata(&bus->dev); | ||
496 | |||
497 | ad7879_disable(ts); | ||
498 | |||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | static int ad7879_resume(bus_device *bus) | ||
503 | { | ||
504 | struct ad7879 *ts = dev_get_drvdata(&bus->dev); | ||
505 | |||
506 | ad7879_enable(ts); | ||
507 | |||
508 | return 0; | ||
509 | } | ||
510 | #else | ||
511 | #define ad7879_suspend NULL | ||
512 | #define ad7879_resume NULL | ||
513 | #endif | ||
514 | |||
515 | #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) | ||
516 | #define MAX_SPI_FREQ_HZ 5000000 | ||
517 | #define AD7879_CMD_MAGIC 0xE000 | ||
518 | #define AD7879_CMD_READ (1 << 10) | ||
519 | #define AD7879_WRITECMD(reg) (AD7879_CMD_MAGIC | (reg & 0xF)) | ||
520 | #define AD7879_READCMD(reg) (AD7879_CMD_MAGIC | AD7879_CMD_READ | (reg & 0xF)) | ||
521 | |||
522 | struct ser_req { | ||
523 | u16 command; | ||
524 | u16 data; | ||
525 | struct spi_message msg; | ||
526 | struct spi_transfer xfer[2]; | ||
527 | }; | ||
528 | |||
529 | /* | ||
530 | * ad7879_read/write are only used for initial setup and for sysfs controls. | ||
531 | * The main traffic is done in ad7879_collect(). | ||
532 | */ | ||
533 | |||
534 | static int ad7879_read(struct spi_device *spi, u8 reg) | ||
535 | { | ||
536 | struct ser_req *req; | ||
537 | int status, ret; | ||
538 | |||
539 | req = kzalloc(sizeof *req, GFP_KERNEL); | ||
540 | if (!req) | ||
541 | return -ENOMEM; | ||
542 | |||
543 | spi_message_init(&req->msg); | ||
544 | |||
545 | req->command = (u16) AD7879_READCMD(reg); | ||
546 | req->xfer[0].tx_buf = &req->command; | ||
547 | req->xfer[0].len = 2; | ||
548 | |||
549 | req->xfer[1].rx_buf = &req->data; | ||
550 | req->xfer[1].len = 2; | ||
551 | |||
552 | spi_message_add_tail(&req->xfer[0], &req->msg); | ||
553 | spi_message_add_tail(&req->xfer[1], &req->msg); | ||
554 | |||
555 | status = spi_sync(spi, &req->msg); | ||
556 | ret = status ? : req->data; | ||
557 | |||
558 | kfree(req); | ||
559 | |||
560 | return ret; | ||
561 | } | ||
562 | |||
563 | static int ad7879_write(struct spi_device *spi, u8 reg, u16 val) | ||
564 | { | ||
565 | struct ser_req *req; | ||
566 | int status; | ||
567 | |||
568 | req = kzalloc(sizeof *req, GFP_KERNEL); | ||
569 | if (!req) | ||
570 | return -ENOMEM; | ||
571 | |||
572 | spi_message_init(&req->msg); | ||
573 | |||
574 | req->command = (u16) AD7879_WRITECMD(reg); | ||
575 | req->xfer[0].tx_buf = &req->command; | ||
576 | req->xfer[0].len = 2; | ||
577 | |||
578 | req->data = val; | ||
579 | req->xfer[1].tx_buf = &req->data; | ||
580 | req->xfer[1].len = 2; | ||
581 | |||
582 | spi_message_add_tail(&req->xfer[0], &req->msg); | ||
583 | spi_message_add_tail(&req->xfer[1], &req->msg); | ||
584 | |||
585 | status = spi_sync(spi, &req->msg); | ||
586 | |||
587 | kfree(req); | ||
588 | |||
589 | return status; | ||
590 | } | ||
591 | |||
592 | static void ad7879_collect(struct ad7879 *ts) | ||
593 | { | ||
594 | int status = spi_sync(ts->bus, &ts->msg); | ||
595 | |||
596 | if (status) | ||
597 | dev_err(&ts->bus->dev, "spi_sync --> %d\n", status); | ||
598 | } | ||
599 | |||
600 | static void ad7879_setup_ts_def_msg(struct ad7879 *ts) | ||
601 | { | ||
602 | struct spi_message *m; | ||
603 | int i; | ||
604 | |||
605 | ts->cmd = (u16) AD7879_READCMD(AD7879_REG_XPLUS); | ||
606 | |||
607 | m = &ts->msg; | ||
608 | spi_message_init(m); | ||
609 | ts->xfer[0].tx_buf = &ts->cmd; | ||
610 | ts->xfer[0].len = 2; | ||
611 | |||
612 | spi_message_add_tail(&ts->xfer[0], m); | ||
613 | |||
614 | for (i = 0; i < AD7879_NR_SENSE; i++) { | ||
615 | ts->xfer[i + 1].rx_buf = &ts->conversion_data[i]; | ||
616 | ts->xfer[i + 1].len = 2; | ||
617 | spi_message_add_tail(&ts->xfer[i + 1], m); | ||
618 | } | ||
619 | } | ||
620 | |||
621 | static int __devinit ad7879_probe(struct spi_device *spi) | ||
622 | { | ||
623 | struct ad7879 *ts; | ||
624 | int error; | ||
625 | |||
626 | /* don't exceed max specified SPI CLK frequency */ | ||
627 | if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) { | ||
628 | dev_err(&spi->dev, "SPI CLK %d Hz?\n", spi->max_speed_hz); | ||
629 | return -EINVAL; | ||
630 | } | ||
631 | |||
632 | ts = kzalloc(sizeof(struct ad7879), GFP_KERNEL); | ||
633 | if (!ts) | ||
634 | return -ENOMEM; | ||
635 | |||
636 | dev_set_drvdata(&spi->dev, ts); | ||
637 | ts->bus = spi; | ||
638 | |||
639 | ad7879_setup_ts_def_msg(ts); | ||
640 | |||
641 | error = ad7879_construct(spi, ts); | ||
642 | if (error) { | ||
643 | dev_set_drvdata(&spi->dev, NULL); | ||
644 | kfree(ts); | ||
645 | } | ||
646 | |||
647 | return 0; | ||
648 | } | ||
649 | |||
650 | static int __devexit ad7879_remove(struct spi_device *spi) | ||
651 | { | ||
652 | struct ad7879 *ts = dev_get_drvdata(&spi->dev); | ||
653 | |||
654 | ad7879_destroy(spi, ts); | ||
655 | dev_set_drvdata(&spi->dev, NULL); | ||
656 | kfree(ts); | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | static struct spi_driver ad7879_driver = { | ||
662 | .driver = { | ||
663 | .name = "ad7879", | ||
664 | .bus = &spi_bus_type, | ||
665 | .owner = THIS_MODULE, | ||
666 | }, | ||
667 | .probe = ad7879_probe, | ||
668 | .remove = __devexit_p(ad7879_remove), | ||
669 | .suspend = ad7879_suspend, | ||
670 | .resume = ad7879_resume, | ||
671 | }; | ||
672 | |||
673 | static int __init ad7879_init(void) | ||
674 | { | ||
675 | return spi_register_driver(&ad7879_driver); | ||
676 | } | ||
677 | module_init(ad7879_init); | ||
678 | |||
679 | static void __exit ad7879_exit(void) | ||
680 | { | ||
681 | spi_unregister_driver(&ad7879_driver); | ||
682 | } | ||
683 | module_exit(ad7879_exit); | ||
684 | |||
685 | #elif defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) | ||
686 | |||
687 | /* All registers are word-sized. | ||
688 | * AD7879 uses a high-byte first convention. | ||
689 | */ | ||
690 | static int ad7879_read(struct i2c_client *client, u8 reg) | ||
691 | { | ||
692 | return swab16(i2c_smbus_read_word_data(client, reg)); | ||
693 | } | ||
694 | |||
695 | static int ad7879_write(struct i2c_client *client, u8 reg, u16 val) | ||
696 | { | ||
697 | return i2c_smbus_write_word_data(client, reg, swab16(val)); | ||
698 | } | ||
699 | |||
700 | static void ad7879_collect(struct ad7879 *ts) | ||
701 | { | ||
702 | int i; | ||
703 | |||
704 | for (i = 0; i < AD7879_NR_SENSE; i++) | ||
705 | ts->conversion_data[i] = ad7879_read(ts->bus, | ||
706 | AD7879_REG_XPLUS + i); | ||
707 | } | ||
708 | |||
709 | static int __devinit ad7879_probe(struct i2c_client *client, | ||
710 | const struct i2c_device_id *id) | ||
711 | { | ||
712 | struct ad7879 *ts; | ||
713 | int error; | ||
714 | |||
715 | if (!i2c_check_functionality(client->adapter, | ||
716 | I2C_FUNC_SMBUS_WORD_DATA)) { | ||
717 | dev_err(&client->dev, "SMBUS Word Data not Supported\n"); | ||
718 | return -EIO; | ||
719 | } | ||
720 | |||
721 | ts = kzalloc(sizeof(struct ad7879), GFP_KERNEL); | ||
722 | if (!ts) | ||
723 | return -ENOMEM; | ||
724 | |||
725 | i2c_set_clientdata(client, ts); | ||
726 | ts->bus = client; | ||
727 | |||
728 | error = ad7879_construct(client, ts); | ||
729 | if (error) { | ||
730 | i2c_set_clientdata(client, NULL); | ||
731 | kfree(ts); | ||
732 | } | ||
733 | |||
734 | return 0; | ||
735 | } | ||
736 | |||
737 | static int __devexit ad7879_remove(struct i2c_client *client) | ||
738 | { | ||
739 | struct ad7879 *ts = dev_get_drvdata(&client->dev); | ||
740 | |||
741 | ad7879_destroy(client, ts); | ||
742 | i2c_set_clientdata(client, NULL); | ||
743 | kfree(ts); | ||
744 | |||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | static const struct i2c_device_id ad7879_id[] = { | ||
749 | { "ad7879", 0 }, | ||
750 | { } | ||
751 | }; | ||
752 | MODULE_DEVICE_TABLE(i2c, ad7879_id); | ||
753 | |||
754 | static struct i2c_driver ad7879_driver = { | ||
755 | .driver = { | ||
756 | .name = "ad7879", | ||
757 | .owner = THIS_MODULE, | ||
758 | }, | ||
759 | .probe = ad7879_probe, | ||
760 | .remove = __devexit_p(ad7879_remove), | ||
761 | .suspend = ad7879_suspend, | ||
762 | .resume = ad7879_resume, | ||
763 | .id_table = ad7879_id, | ||
764 | }; | ||
765 | |||
766 | static int __init ad7879_init(void) | ||
767 | { | ||
768 | return i2c_add_driver(&ad7879_driver); | ||
769 | } | ||
770 | module_init(ad7879_init); | ||
771 | |||
772 | static void __exit ad7879_exit(void) | ||
773 | { | ||
774 | i2c_del_driver(&ad7879_driver); | ||
775 | } | ||
776 | module_exit(ad7879_exit); | ||
777 | #endif | ||
778 | |||
779 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); | ||
780 | MODULE_DESCRIPTION("AD7879(-1) touchscreen Driver"); | ||
781 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 056ac77e2cf0..2b01e56568f8 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -127,6 +127,8 @@ struct ads7846 { | |||
127 | void (*filter_cleanup)(void *data); | 127 | void (*filter_cleanup)(void *data); |
128 | int (*get_pendown_state)(void); | 128 | int (*get_pendown_state)(void); |
129 | int gpio_pendown; | 129 | int gpio_pendown; |
130 | |||
131 | void (*wait_for_sync)(void); | ||
130 | }; | 132 | }; |
131 | 133 | ||
132 | /* leave chip selected when we're done, for quicker re-select? */ | 134 | /* leave chip selected when we're done, for quicker re-select? */ |
@@ -511,6 +513,10 @@ static int get_pendown_state(struct ads7846 *ts) | |||
511 | return !gpio_get_value(ts->gpio_pendown); | 513 | return !gpio_get_value(ts->gpio_pendown); |
512 | } | 514 | } |
513 | 515 | ||
516 | static void null_wait_for_sync(void) | ||
517 | { | ||
518 | } | ||
519 | |||
514 | /* | 520 | /* |
515 | * PENIRQ only kicks the timer. The timer only reissues the SPI transfer, | 521 | * PENIRQ only kicks the timer. The timer only reissues the SPI transfer, |
516 | * to retrieve touchscreen status. | 522 | * to retrieve touchscreen status. |
@@ -686,6 +692,7 @@ static void ads7846_rx_val(void *ads) | |||
686 | default: | 692 | default: |
687 | BUG(); | 693 | BUG(); |
688 | } | 694 | } |
695 | ts->wait_for_sync(); | ||
689 | status = spi_async(ts->spi, m); | 696 | status = spi_async(ts->spi, m); |
690 | if (status) | 697 | if (status) |
691 | dev_err(&ts->spi->dev, "spi_async --> %d\n", | 698 | dev_err(&ts->spi->dev, "spi_async --> %d\n", |
@@ -723,6 +730,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle) | |||
723 | } else { | 730 | } else { |
724 | /* pen is still down, continue with the measurement */ | 731 | /* pen is still down, continue with the measurement */ |
725 | ts->msg_idx = 0; | 732 | ts->msg_idx = 0; |
733 | ts->wait_for_sync(); | ||
726 | status = spi_async(ts->spi, &ts->msg[0]); | 734 | status = spi_async(ts->spi, &ts->msg[0]); |
727 | if (status) | 735 | if (status) |
728 | dev_err(&ts->spi->dev, "spi_async --> %d\n", status); | 736 | dev_err(&ts->spi->dev, "spi_async --> %d\n", status); |
@@ -746,7 +754,7 @@ static irqreturn_t ads7846_irq(int irq, void *handle) | |||
746 | * that here. (The "generic irq" framework may help...) | 754 | * that here. (The "generic irq" framework may help...) |
747 | */ | 755 | */ |
748 | ts->irq_disabled = 1; | 756 | ts->irq_disabled = 1; |
749 | disable_irq(ts->spi->irq); | 757 | disable_irq_nosync(ts->spi->irq); |
750 | ts->pending = 1; | 758 | ts->pending = 1; |
751 | hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY), | 759 | hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY), |
752 | HRTIMER_MODE_REL); | 760 | HRTIMER_MODE_REL); |
@@ -947,6 +955,8 @@ static int __devinit ads7846_probe(struct spi_device *spi) | |||
947 | ts->penirq_recheck_delay_usecs = | 955 | ts->penirq_recheck_delay_usecs = |
948 | pdata->penirq_recheck_delay_usecs; | 956 | pdata->penirq_recheck_delay_usecs; |
949 | 957 | ||
958 | ts->wait_for_sync = pdata->wait_for_sync ? : null_wait_for_sync; | ||
959 | |||
950 | snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev)); | 960 | snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev)); |
951 | 961 | ||
952 | input_dev->name = "ADS784x Touchscreen"; | 962 | input_dev->name = "ADS784x Touchscreen"; |
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c index fa67d782c3c3..3ffd4c4b170c 100644 --- a/drivers/input/touchscreen/da9034-ts.c +++ b/drivers/input/touchscreen/da9034-ts.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2006-2008 Marvell International Ltd. | 4 | * Copyright (C) 2006-2008 Marvell International Ltd. |
5 | * Fengwei Yin <fengwei.yin@marvell.com> | 5 | * Fengwei Yin <fengwei.yin@marvell.com> |
6 | * Bin Yang <bin.yang@marvell.com> | ||
6 | * Eric Miao <eric.miao@marvell.com> | 7 | * Eric Miao <eric.miao@marvell.com> |
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
@@ -175,6 +176,16 @@ static void da9034_event_handler(struct da9034_touch *touch, int event) | |||
175 | goto err_reset; | 176 | goto err_reset; |
176 | 177 | ||
177 | touch->state = STATE_STOP; | 178 | touch->state = STATE_STOP; |
179 | |||
180 | /* FIXME: PEN_{UP/DOWN} events are expected to be | ||
181 | * available by stopping TSI, but this is found not | ||
182 | * always true, delay and simulate such an event | ||
183 | * here is more reliable | ||
184 | */ | ||
185 | mdelay(1); | ||
186 | da9034_event_handler(touch, | ||
187 | is_pen_down(touch) ? EVENT_PEN_DOWN : | ||
188 | EVENT_PEN_UP); | ||
178 | break; | 189 | break; |
179 | 190 | ||
180 | case STATE_STOP: | 191 | case STATE_STOP: |
@@ -189,8 +200,6 @@ static void da9034_event_handler(struct da9034_touch *touch, int event) | |||
189 | report_pen_up(touch); | 200 | report_pen_up(touch); |
190 | touch->state = STATE_IDLE; | 201 | touch->state = STATE_IDLE; |
191 | } | 202 | } |
192 | |||
193 | input_sync(touch->input_dev); | ||
194 | break; | 203 | break; |
195 | 204 | ||
196 | case STATE_WAIT: | 205 | case STATE_WAIT: |
@@ -200,8 +209,10 @@ static void da9034_event_handler(struct da9034_touch *touch, int event) | |||
200 | if (is_pen_down(touch)) { | 209 | if (is_pen_down(touch)) { |
201 | start_tsi(touch); | 210 | start_tsi(touch); |
202 | touch->state = STATE_BUSY; | 211 | touch->state = STATE_BUSY; |
203 | } else | 212 | } else { |
213 | report_pen_up(touch); | ||
204 | touch->state = STATE_IDLE; | 214 | touch->state = STATE_IDLE; |
215 | } | ||
205 | break; | 216 | break; |
206 | } | 217 | } |
207 | return; | 218 | return; |
@@ -226,16 +237,12 @@ static int da9034_touch_notifier(struct notifier_block *nb, | |||
226 | struct da9034_touch *touch = | 237 | struct da9034_touch *touch = |
227 | container_of(nb, struct da9034_touch, notifier); | 238 | container_of(nb, struct da9034_touch, notifier); |
228 | 239 | ||
229 | if (event & DA9034_EVENT_PEN_DOWN) { | ||
230 | if (is_pen_down(touch)) | ||
231 | da9034_event_handler(touch, EVENT_PEN_DOWN); | ||
232 | else | ||
233 | da9034_event_handler(touch, EVENT_PEN_UP); | ||
234 | } | ||
235 | |||
236 | if (event & DA9034_EVENT_TSI_READY) | 240 | if (event & DA9034_EVENT_TSI_READY) |
237 | da9034_event_handler(touch, EVENT_TSI_READY); | 241 | da9034_event_handler(touch, EVENT_TSI_READY); |
238 | 242 | ||
243 | if ((event & DA9034_EVENT_PEN_DOWN) && touch->state == STATE_IDLE) | ||
244 | da9034_event_handler(touch, EVENT_PEN_DOWN); | ||
245 | |||
239 | return 0; | 246 | return 0; |
240 | } | 247 | } |
241 | 248 | ||
@@ -385,6 +392,6 @@ static void __exit da9034_touch_exit(void) | |||
385 | module_exit(da9034_touch_exit); | 392 | module_exit(da9034_touch_exit); |
386 | 393 | ||
387 | MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034"); | 394 | MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034"); |
388 | MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"); | 395 | MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>, Bin Yang <bin.yang@marvell.com>"); |
389 | MODULE_LICENSE("GPL"); | 396 | MODULE_LICENSE("GPL"); |
390 | MODULE_ALIAS("platform:da9034-touch"); | 397 | MODULE_ALIAS("platform:da9034-touch"); |
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c index 1d11e2be9ef8..4cc047a5116e 100644 --- a/drivers/input/touchscreen/mainstone-wm97xx.c +++ b/drivers/input/touchscreen/mainstone-wm97xx.c | |||
@@ -111,13 +111,12 @@ static void wm97xx_acc_pen_up(struct wm97xx *wm) | |||
111 | #else | 111 | #else |
112 | static void wm97xx_acc_pen_up(struct wm97xx *wm) | 112 | static void wm97xx_acc_pen_up(struct wm97xx *wm) |
113 | { | 113 | { |
114 | int count = 16; | 114 | unsigned int count; |
115 | |||
115 | schedule_timeout_uninterruptible(1); | 116 | schedule_timeout_uninterruptible(1); |
116 | 117 | ||
117 | while (count < 16) { | 118 | for (count = 0; count < 16; count++) |
118 | MODR; | 119 | MODR; |
119 | count--; | ||
120 | } | ||
121 | } | 120 | } |
122 | #endif | 121 | #endif |
123 | 122 | ||
@@ -162,6 +161,7 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm) | |||
162 | input_report_abs(wm->input_dev, ABS_X, x & 0xfff); | 161 | input_report_abs(wm->input_dev, ABS_X, x & 0xfff); |
163 | input_report_abs(wm->input_dev, ABS_Y, y & 0xfff); | 162 | input_report_abs(wm->input_dev, ABS_Y, y & 0xfff); |
164 | input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff); | 163 | input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff); |
164 | input_report_key(wm->input_dev, BTN_TOUCH, (p != 0)); | ||
165 | input_sync(wm->input_dev); | 165 | input_sync(wm->input_dev); |
166 | reads++; | 166 | reads++; |
167 | } while (reads < cinfo[sp_idx].reads); | 167 | } while (reads < cinfo[sp_idx].reads); |
@@ -245,7 +245,7 @@ static void wm97xx_irq_enable(struct wm97xx *wm, int enable) | |||
245 | if (enable) | 245 | if (enable) |
246 | enable_irq(wm->pen_irq); | 246 | enable_irq(wm->pen_irq); |
247 | else | 247 | else |
248 | disable_irq(wm->pen_irq); | 248 | disable_irq_nosync(wm->pen_irq); |
249 | } | 249 | } |
250 | 250 | ||
251 | static struct wm97xx_mach_ops mainstone_mach_ops = { | 251 | static struct wm97xx_mach_ops mainstone_mach_ops = { |
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index 54986627def0..e868264fe799 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c | |||
@@ -151,12 +151,14 @@ static void ucb1400_ts_evt_add(struct input_dev *idev, u16 pressure, u16 x, u16 | |||
151 | input_report_abs(idev, ABS_X, x); | 151 | input_report_abs(idev, ABS_X, x); |
152 | input_report_abs(idev, ABS_Y, y); | 152 | input_report_abs(idev, ABS_Y, y); |
153 | input_report_abs(idev, ABS_PRESSURE, pressure); | 153 | input_report_abs(idev, ABS_PRESSURE, pressure); |
154 | input_report_key(idev, BTN_TOUCH, 1); | ||
154 | input_sync(idev); | 155 | input_sync(idev); |
155 | } | 156 | } |
156 | 157 | ||
157 | static void ucb1400_ts_event_release(struct input_dev *idev) | 158 | static void ucb1400_ts_event_release(struct input_dev *idev) |
158 | { | 159 | { |
159 | input_report_abs(idev, ABS_PRESSURE, 0); | 160 | input_report_abs(idev, ABS_PRESSURE, 0); |
161 | input_report_key(idev, BTN_TOUCH, 0); | ||
160 | input_sync(idev); | 162 | input_sync(idev); |
161 | } | 163 | } |
162 | 164 | ||
@@ -377,7 +379,8 @@ static int ucb1400_ts_probe(struct platform_device *dev) | |||
377 | ucb->ts_idev->id.product = ucb->id; | 379 | ucb->ts_idev->id.product = ucb->id; |
378 | ucb->ts_idev->open = ucb1400_ts_open; | 380 | ucb->ts_idev->open = ucb1400_ts_open; |
379 | ucb->ts_idev->close = ucb1400_ts_close; | 381 | ucb->ts_idev->close = ucb1400_ts_close; |
380 | ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS); | 382 | ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); |
383 | ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); | ||
381 | 384 | ||
382 | ucb1400_adc_enable(ucb->ac97); | 385 | ucb1400_adc_enable(ucb->ac97); |
383 | x_res = ucb1400_ts_read_xres(ucb); | 386 | x_res = ucb1400_ts_read_xres(ucb); |
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index d15aa11d7056..69af8385ab14 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c | |||
@@ -370,8 +370,7 @@ static int wm97xx_init_pen_irq(struct wm97xx *wm) | |||
370 | * provided. */ | 370 | * provided. */ |
371 | BUG_ON(!wm->mach_ops->irq_enable); | 371 | BUG_ON(!wm->mach_ops->irq_enable); |
372 | 372 | ||
373 | if (request_irq(wm->pen_irq, wm97xx_pen_interrupt, | 373 | if (request_irq(wm->pen_irq, wm97xx_pen_interrupt, IRQF_SHARED, |
374 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, | ||
375 | "wm97xx-pen", wm)) { | 374 | "wm97xx-pen", wm)) { |
376 | dev_err(wm->dev, | 375 | dev_err(wm->dev, |
377 | "Failed to register pen down interrupt, polling"); | 376 | "Failed to register pen down interrupt, polling"); |
@@ -409,6 +408,7 @@ static int wm97xx_read_samples(struct wm97xx *wm) | |||
409 | wm->pen_is_down = 0; | 408 | wm->pen_is_down = 0; |
410 | dev_dbg(wm->dev, "pen up\n"); | 409 | dev_dbg(wm->dev, "pen up\n"); |
411 | input_report_abs(wm->input_dev, ABS_PRESSURE, 0); | 410 | input_report_abs(wm->input_dev, ABS_PRESSURE, 0); |
411 | input_report_key(wm->input_dev, BTN_TOUCH, 0); | ||
412 | input_sync(wm->input_dev); | 412 | input_sync(wm->input_dev); |
413 | } else if (!(rc & RC_AGAIN)) { | 413 | } else if (!(rc & RC_AGAIN)) { |
414 | /* We need high frequency updates only while | 414 | /* We need high frequency updates only while |
@@ -433,6 +433,7 @@ static int wm97xx_read_samples(struct wm97xx *wm) | |||
433 | input_report_abs(wm->input_dev, ABS_X, data.x & 0xfff); | 433 | input_report_abs(wm->input_dev, ABS_X, data.x & 0xfff); |
434 | input_report_abs(wm->input_dev, ABS_Y, data.y & 0xfff); | 434 | input_report_abs(wm->input_dev, ABS_Y, data.y & 0xfff); |
435 | input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff); | 435 | input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff); |
436 | input_report_key(wm->input_dev, BTN_TOUCH, 1); | ||
436 | input_sync(wm->input_dev); | 437 | input_sync(wm->input_dev); |
437 | wm->pen_is_down = 1; | 438 | wm->pen_is_down = 1; |
438 | wm->ts_reader_interval = wm->ts_reader_min_interval; | 439 | wm->ts_reader_interval = wm->ts_reader_min_interval; |
@@ -628,18 +629,21 @@ static int wm97xx_probe(struct device *dev) | |||
628 | wm->input_dev->phys = "wm97xx"; | 629 | wm->input_dev->phys = "wm97xx"; |
629 | wm->input_dev->open = wm97xx_ts_input_open; | 630 | wm->input_dev->open = wm97xx_ts_input_open; |
630 | wm->input_dev->close = wm97xx_ts_input_close; | 631 | wm->input_dev->close = wm97xx_ts_input_close; |
631 | set_bit(EV_ABS, wm->input_dev->evbit); | 632 | |
632 | set_bit(ABS_X, wm->input_dev->absbit); | 633 | __set_bit(EV_ABS, wm->input_dev->evbit); |
633 | set_bit(ABS_Y, wm->input_dev->absbit); | 634 | __set_bit(EV_KEY, wm->input_dev->evbit); |
634 | set_bit(ABS_PRESSURE, wm->input_dev->absbit); | 635 | __set_bit(BTN_TOUCH, wm->input_dev->keybit); |
636 | |||
635 | input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1], | 637 | input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1], |
636 | abs_x[2], 0); | 638 | abs_x[2], 0); |
637 | input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1], | 639 | input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1], |
638 | abs_y[2], 0); | 640 | abs_y[2], 0); |
639 | input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1], | 641 | input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1], |
640 | abs_p[2], 0); | 642 | abs_p[2], 0); |
643 | |||
641 | input_set_drvdata(wm->input_dev, wm); | 644 | input_set_drvdata(wm->input_dev, wm); |
642 | wm->input_dev->dev.parent = dev; | 645 | wm->input_dev->dev.parent = dev; |
646 | |||
643 | ret = input_register_device(wm->input_dev); | 647 | ret = input_register_device(wm->input_dev); |
644 | if (ret < 0) | 648 | if (ret < 0) |
645 | goto dev_alloc_err; | 649 | goto dev_alloc_err; |
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c new file mode 100644 index 000000000000..41e4359c277c --- /dev/null +++ b/drivers/input/touchscreen/zylonite-wm97xx.c | |||
@@ -0,0 +1,240 @@ | |||
1 | /* | ||
2 | * zylonite-wm97xx.c -- Zylonite Continuous Touch screen driver | ||
3 | * | ||
4 | * Copyright 2004, 2007, 2008 Wolfson Microelectronics PLC. | ||
5 | * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> | ||
6 | * Parts Copyright : Ian Molton <spyro@f2s.com> | ||
7 | * Andrew Zabolotny <zap@homelink.ru> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * Notes: | ||
15 | * This is a wm97xx extended touch driver supporting interrupt driven | ||
16 | * and continuous operation on Marvell Zylonite development systems | ||
17 | * (which have a WM9713 on board). | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/moduleparam.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/wm97xx.h> | ||
29 | |||
30 | #include <mach/hardware.h> | ||
31 | #include <mach/mfp.h> | ||
32 | #include <mach/regs-ac97.h> | ||
33 | |||
34 | struct continuous { | ||
35 | u16 id; /* codec id */ | ||
36 | u8 code; /* continuous code */ | ||
37 | u8 reads; /* number of coord reads per read cycle */ | ||
38 | u32 speed; /* number of coords per second */ | ||
39 | }; | ||
40 | |||
41 | #define WM_READS(sp) ((sp / HZ) + 1) | ||
42 | |||
43 | static const struct continuous cinfo[] = { | ||
44 | { WM9713_ID2, 0, WM_READS(94), 94 }, | ||
45 | { WM9713_ID2, 1, WM_READS(120), 120 }, | ||
46 | { WM9713_ID2, 2, WM_READS(154), 154 }, | ||
47 | { WM9713_ID2, 3, WM_READS(188), 188 }, | ||
48 | }; | ||
49 | |||
50 | /* continuous speed index */ | ||
51 | static int sp_idx; | ||
52 | |||
53 | /* | ||
54 | * Pen sampling frequency (Hz) in continuous mode. | ||
55 | */ | ||
56 | static int cont_rate = 200; | ||
57 | module_param(cont_rate, int, 0); | ||
58 | MODULE_PARM_DESC(cont_rate, "Sampling rate in continuous mode (Hz)"); | ||
59 | |||
60 | /* | ||
61 | * Pressure readback. | ||
62 | * | ||
63 | * Set to 1 to read back pen down pressure | ||
64 | */ | ||
65 | static int pressure; | ||
66 | module_param(pressure, int, 0); | ||
67 | MODULE_PARM_DESC(pressure, "Pressure readback (1 = pressure, 0 = no pressure)"); | ||
68 | |||
69 | /* | ||
70 | * AC97 touch data slot. | ||
71 | * | ||
72 | * Touch screen readback data ac97 slot | ||
73 | */ | ||
74 | static int ac97_touch_slot = 5; | ||
75 | module_param(ac97_touch_slot, int, 0); | ||
76 | MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number"); | ||
77 | |||
78 | |||
79 | /* flush AC97 slot 5 FIFO machines */ | ||
80 | static void wm97xx_acc_pen_up(struct wm97xx *wm) | ||
81 | { | ||
82 | int i; | ||
83 | |||
84 | msleep(1); | ||
85 | |||
86 | for (i = 0; i < 16; i++) | ||
87 | MODR; | ||
88 | } | ||
89 | |||
90 | static int wm97xx_acc_pen_down(struct wm97xx *wm) | ||
91 | { | ||
92 | u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES; | ||
93 | int reads = 0; | ||
94 | static u16 last, tries; | ||
95 | |||
96 | /* When the AC97 queue has been drained we need to allow time | ||
97 | * to buffer up samples otherwise we end up spinning polling | ||
98 | * for samples. The controller can't have a suitably low | ||
99 | * threashold set to use the notifications it gives. | ||
100 | */ | ||
101 | msleep(1); | ||
102 | |||
103 | if (tries > 5) { | ||
104 | tries = 0; | ||
105 | return RC_PENUP; | ||
106 | } | ||
107 | |||
108 | x = MODR; | ||
109 | if (x == last) { | ||
110 | tries++; | ||
111 | return RC_AGAIN; | ||
112 | } | ||
113 | last = x; | ||
114 | do { | ||
115 | if (reads) | ||
116 | x = MODR; | ||
117 | y = MODR; | ||
118 | if (pressure) | ||
119 | p = MODR; | ||
120 | |||
121 | /* are samples valid */ | ||
122 | if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X || | ||
123 | (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y || | ||
124 | (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES) | ||
125 | goto up; | ||
126 | |||
127 | /* coordinate is good */ | ||
128 | tries = 0; | ||
129 | input_report_abs(wm->input_dev, ABS_X, x & 0xfff); | ||
130 | input_report_abs(wm->input_dev, ABS_Y, y & 0xfff); | ||
131 | input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff); | ||
132 | input_report_key(wm->input_dev, BTN_TOUCH, (p != 0)); | ||
133 | input_sync(wm->input_dev); | ||
134 | reads++; | ||
135 | } while (reads < cinfo[sp_idx].reads); | ||
136 | up: | ||
137 | return RC_PENDOWN | RC_AGAIN; | ||
138 | } | ||
139 | |||
140 | static int wm97xx_acc_startup(struct wm97xx *wm) | ||
141 | { | ||
142 | int idx; | ||
143 | |||
144 | /* check we have a codec */ | ||
145 | if (wm->ac97 == NULL) | ||
146 | return -ENODEV; | ||
147 | |||
148 | /* Go you big red fire engine */ | ||
149 | for (idx = 0; idx < ARRAY_SIZE(cinfo); idx++) { | ||
150 | if (wm->id != cinfo[idx].id) | ||
151 | continue; | ||
152 | sp_idx = idx; | ||
153 | if (cont_rate <= cinfo[idx].speed) | ||
154 | break; | ||
155 | } | ||
156 | wm->acc_rate = cinfo[sp_idx].code; | ||
157 | wm->acc_slot = ac97_touch_slot; | ||
158 | dev_info(wm->dev, | ||
159 | "zylonite accelerated touchscreen driver, %d samples/sec\n", | ||
160 | cinfo[sp_idx].speed); | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static void wm97xx_irq_enable(struct wm97xx *wm, int enable) | ||
166 | { | ||
167 | if (enable) | ||
168 | enable_irq(wm->pen_irq); | ||
169 | else | ||
170 | disable_irq_nosync(wm->pen_irq); | ||
171 | } | ||
172 | |||
173 | static struct wm97xx_mach_ops zylonite_mach_ops = { | ||
174 | .acc_enabled = 1, | ||
175 | .acc_pen_up = wm97xx_acc_pen_up, | ||
176 | .acc_pen_down = wm97xx_acc_pen_down, | ||
177 | .acc_startup = wm97xx_acc_startup, | ||
178 | .irq_enable = wm97xx_irq_enable, | ||
179 | .irq_gpio = WM97XX_GPIO_2, | ||
180 | }; | ||
181 | |||
182 | static int zylonite_wm97xx_probe(struct platform_device *pdev) | ||
183 | { | ||
184 | struct wm97xx *wm = platform_get_drvdata(pdev); | ||
185 | int gpio_touch_irq; | ||
186 | |||
187 | if (cpu_is_pxa320()) | ||
188 | gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO15); | ||
189 | else | ||
190 | gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26); | ||
191 | |||
192 | wm->pen_irq = IRQ_GPIO(gpio_touch_irq); | ||
193 | set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); | ||
194 | |||
195 | wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, | ||
196 | WM97XX_GPIO_POL_HIGH, | ||
197 | WM97XX_GPIO_STICKY, | ||
198 | WM97XX_GPIO_WAKE); | ||
199 | wm97xx_config_gpio(wm, WM97XX_GPIO_2, WM97XX_GPIO_OUT, | ||
200 | WM97XX_GPIO_POL_HIGH, | ||
201 | WM97XX_GPIO_NOTSTICKY, | ||
202 | WM97XX_GPIO_NOWAKE); | ||
203 | |||
204 | return wm97xx_register_mach_ops(wm, &zylonite_mach_ops); | ||
205 | } | ||
206 | |||
207 | static int zylonite_wm97xx_remove(struct platform_device *pdev) | ||
208 | { | ||
209 | struct wm97xx *wm = platform_get_drvdata(pdev); | ||
210 | |||
211 | wm97xx_unregister_mach_ops(wm); | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static struct platform_driver zylonite_wm97xx_driver = { | ||
217 | .probe = zylonite_wm97xx_probe, | ||
218 | .remove = zylonite_wm97xx_remove, | ||
219 | .driver = { | ||
220 | .name = "wm97xx-touch", | ||
221 | }, | ||
222 | }; | ||
223 | |||
224 | static int __init zylonite_wm97xx_init(void) | ||
225 | { | ||
226 | return platform_driver_register(&zylonite_wm97xx_driver); | ||
227 | } | ||
228 | |||
229 | static void __exit zylonite_wm97xx_exit(void) | ||
230 | { | ||
231 | platform_driver_unregister(&zylonite_wm97xx_driver); | ||
232 | } | ||
233 | |||
234 | module_init(zylonite_wm97xx_init); | ||
235 | module_exit(zylonite_wm97xx_exit); | ||
236 | |||
237 | /* Module information */ | ||
238 | MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); | ||
239 | MODULE_DESCRIPTION("wm97xx continuous touch driver for Zylonite"); | ||
240 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c index ec3c0e507669..2b3a055059ea 100644 --- a/drivers/isdn/hisax/st5481_usb.c +++ b/drivers/isdn/hisax/st5481_usb.c | |||
@@ -149,14 +149,7 @@ static void usb_ctrl_complete(struct urb *urb) | |||
149 | if (ctrl_msg->dr.bRequest == USB_REQ_CLEAR_FEATURE) { | 149 | if (ctrl_msg->dr.bRequest == USB_REQ_CLEAR_FEATURE) { |
150 | /* Special case handling for pipe reset */ | 150 | /* Special case handling for pipe reset */ |
151 | le16_to_cpus(&ctrl_msg->dr.wIndex); | 151 | le16_to_cpus(&ctrl_msg->dr.wIndex); |
152 | 152 | usb_reset_endpoint(adapter->usb_dev, ctrl_msg->dr.wIndex); | |
153 | /* toggle is reset on clear */ | ||
154 | usb_settoggle(adapter->usb_dev, | ||
155 | ctrl_msg->dr.wIndex & ~USB_DIR_IN, | ||
156 | (ctrl_msg->dr.wIndex & USB_DIR_IN) == 0, | ||
157 | 0); | ||
158 | |||
159 | |||
160 | } | 153 | } |
161 | 154 | ||
162 | if (ctrl_msg->complete) | 155 | if (ctrl_msg->complete) |
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 102ef4a14c5f..d2109054de85 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c | |||
@@ -82,7 +82,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template, | |||
82 | if (!gpio_is_valid(template->gpio)) { | 82 | if (!gpio_is_valid(template->gpio)) { |
83 | printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n", | 83 | printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n", |
84 | template->gpio, template->name); | 84 | template->gpio, template->name); |
85 | return; | 85 | return 0; |
86 | } | 86 | } |
87 | 87 | ||
88 | ret = gpio_request(template->gpio, template->name); | 88 | ret = gpio_request(template->gpio, template->name); |
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index ac8a4a3741b8..af92a176697f 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h | |||
@@ -158,7 +158,8 @@ void free_interrupts(void); | |||
158 | /* segments.c: */ | 158 | /* segments.c: */ |
159 | void setup_default_gdt_entries(struct lguest_ro_state *state); | 159 | void setup_default_gdt_entries(struct lguest_ro_state *state); |
160 | void setup_guest_gdt(struct lg_cpu *cpu); | 160 | void setup_guest_gdt(struct lg_cpu *cpu); |
161 | void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num); | 161 | void load_guest_gdt_entry(struct lg_cpu *cpu, unsigned int i, |
162 | u32 low, u32 hi); | ||
162 | void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array); | 163 | void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array); |
163 | void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt); | 164 | void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt); |
164 | void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); | 165 | void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); |
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index 4f15439b7f12..7ede64ffeef9 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c | |||
@@ -144,18 +144,19 @@ void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) | |||
144 | gdt[i] = cpu->arch.gdt[i]; | 144 | gdt[i] = cpu->arch.gdt[i]; |
145 | } | 145 | } |
146 | 146 | ||
147 | /*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). | 147 | /*H:620 This is where the Guest asks us to load a new GDT entry |
148 | * We copy it from the Guest and tweak the entries. */ | 148 | * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. */ |
149 | void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num) | 149 | void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) |
150 | { | 150 | { |
151 | /* We assume the Guest has the same number of GDT entries as the | 151 | /* We assume the Guest has the same number of GDT entries as the |
152 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ | 152 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ |
153 | if (num > ARRAY_SIZE(cpu->arch.gdt)) | 153 | if (num > ARRAY_SIZE(cpu->arch.gdt)) |
154 | kill_guest(cpu, "too many gdt entries %i", num); | 154 | kill_guest(cpu, "too many gdt entries %i", num); |
155 | 155 | ||
156 | /* We read the whole thing in, then fix it up. */ | 156 | /* Set it up, then fix it. */ |
157 | __lgread(cpu, cpu->arch.gdt, table, num * sizeof(cpu->arch.gdt[0])); | 157 | cpu->arch.gdt[num].a = lo; |
158 | fixup_gdt_table(cpu, 0, ARRAY_SIZE(cpu->arch.gdt)); | 158 | cpu->arch.gdt[num].b = hi; |
159 | fixup_gdt_table(cpu, num, num+1); | ||
159 | /* Mark that the GDT changed so the core knows it has to copy it again, | 160 | /* Mark that the GDT changed so the core knows it has to copy it again, |
160 | * even if the Guest is run on the same CPU. */ | 161 | * even if the Guest is run on the same CPU. */ |
161 | cpu->changed |= CHANGED_GDT; | 162 | cpu->changed |= CHANGED_GDT; |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index a6b717644be0..1a83910f674f 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
@@ -324,6 +324,11 @@ static void rewrite_hypercall(struct lg_cpu *cpu) | |||
324 | u8 insn[3] = {0xcd, 0x1f, 0x90}; | 324 | u8 insn[3] = {0xcd, 0x1f, 0x90}; |
325 | 325 | ||
326 | __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); | 326 | __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); |
327 | /* The above write might have caused a copy of that page to be made | ||
328 | * (if it was read-only). We need to make sure the Guest has | ||
329 | * up-to-date pagetables. As this doesn't happen often, we can just | ||
330 | * drop them all. */ | ||
331 | guest_pagetable_clear_all(cpu); | ||
327 | } | 332 | } |
328 | 333 | ||
329 | static bool is_hypercall(struct lg_cpu *cpu) | 334 | static bool is_hypercall(struct lg_cpu *cpu) |
@@ -563,8 +568,8 @@ void __exit lguest_arch_host_fini(void) | |||
563 | int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args) | 568 | int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args) |
564 | { | 569 | { |
565 | switch (args->arg0) { | 570 | switch (args->arg0) { |
566 | case LHCALL_LOAD_GDT: | 571 | case LHCALL_LOAD_GDT_ENTRY: |
567 | load_guest_gdt(cpu, args->arg1, args->arg2); | 572 | load_guest_gdt_entry(cpu, args->arg1, args->arg2, args->arg3); |
568 | break; | 573 | break; |
569 | case LHCALL_LOAD_IDT_ENTRY: | 574 | case LHCALL_LOAD_IDT_ENTRY: |
570 | load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3); | 575 | load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3); |
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h deleted file mode 100644 index 345098b4ca77..000000000000 --- a/drivers/md/dm-bio-list.h +++ /dev/null | |||
@@ -1,117 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Red Hat UK Ltd. | ||
3 | * | ||
4 | * This file is released under the GPL. | ||
5 | */ | ||
6 | |||
7 | #ifndef DM_BIO_LIST_H | ||
8 | #define DM_BIO_LIST_H | ||
9 | |||
10 | #include <linux/bio.h> | ||
11 | |||
12 | #ifdef CONFIG_BLOCK | ||
13 | |||
14 | struct bio_list { | ||
15 | struct bio *head; | ||
16 | struct bio *tail; | ||
17 | }; | ||
18 | |||
19 | static inline int bio_list_empty(const struct bio_list *bl) | ||
20 | { | ||
21 | return bl->head == NULL; | ||
22 | } | ||
23 | |||
24 | static inline void bio_list_init(struct bio_list *bl) | ||
25 | { | ||
26 | bl->head = bl->tail = NULL; | ||
27 | } | ||
28 | |||
29 | #define bio_list_for_each(bio, bl) \ | ||
30 | for (bio = (bl)->head; bio; bio = bio->bi_next) | ||
31 | |||
32 | static inline unsigned bio_list_size(const struct bio_list *bl) | ||
33 | { | ||
34 | unsigned sz = 0; | ||
35 | struct bio *bio; | ||
36 | |||
37 | bio_list_for_each(bio, bl) | ||
38 | sz++; | ||
39 | |||
40 | return sz; | ||
41 | } | ||
42 | |||
43 | static inline void bio_list_add(struct bio_list *bl, struct bio *bio) | ||
44 | { | ||
45 | bio->bi_next = NULL; | ||
46 | |||
47 | if (bl->tail) | ||
48 | bl->tail->bi_next = bio; | ||
49 | else | ||
50 | bl->head = bio; | ||
51 | |||
52 | bl->tail = bio; | ||
53 | } | ||
54 | |||
55 | static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) | ||
56 | { | ||
57 | bio->bi_next = bl->head; | ||
58 | |||
59 | bl->head = bio; | ||
60 | |||
61 | if (!bl->tail) | ||
62 | bl->tail = bio; | ||
63 | } | ||
64 | |||
65 | static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) | ||
66 | { | ||
67 | if (!bl2->head) | ||
68 | return; | ||
69 | |||
70 | if (bl->tail) | ||
71 | bl->tail->bi_next = bl2->head; | ||
72 | else | ||
73 | bl->head = bl2->head; | ||
74 | |||
75 | bl->tail = bl2->tail; | ||
76 | } | ||
77 | |||
78 | static inline void bio_list_merge_head(struct bio_list *bl, | ||
79 | struct bio_list *bl2) | ||
80 | { | ||
81 | if (!bl2->head) | ||
82 | return; | ||
83 | |||
84 | if (bl->head) | ||
85 | bl2->tail->bi_next = bl->head; | ||
86 | else | ||
87 | bl->tail = bl2->tail; | ||
88 | |||
89 | bl->head = bl2->head; | ||
90 | } | ||
91 | |||
92 | static inline struct bio *bio_list_pop(struct bio_list *bl) | ||
93 | { | ||
94 | struct bio *bio = bl->head; | ||
95 | |||
96 | if (bio) { | ||
97 | bl->head = bl->head->bi_next; | ||
98 | if (!bl->head) | ||
99 | bl->tail = NULL; | ||
100 | |||
101 | bio->bi_next = NULL; | ||
102 | } | ||
103 | |||
104 | return bio; | ||
105 | } | ||
106 | |||
107 | static inline struct bio *bio_list_get(struct bio_list *bl) | ||
108 | { | ||
109 | struct bio *bio = bl->head; | ||
110 | |||
111 | bl->head = bl->tail = NULL; | ||
112 | |||
113 | return bio; | ||
114 | } | ||
115 | |||
116 | #endif /* CONFIG_BLOCK */ | ||
117 | #endif | ||
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 59ee1b015d2d..559dbb52bc85 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -15,8 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/device-mapper.h> | 16 | #include <linux/device-mapper.h> |
17 | 17 | ||
18 | #include "dm-bio-list.h" | ||
19 | |||
20 | #define DM_MSG_PREFIX "delay" | 18 | #define DM_MSG_PREFIX "delay" |
21 | 19 | ||
22 | struct delay_c { | 20 | struct delay_c { |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index f01096549a93..823ceba6efa8 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -1047,6 +1047,19 @@ static int populate_table(struct dm_table *table, | |||
1047 | return dm_table_complete(table); | 1047 | return dm_table_complete(table); |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | static int table_prealloc_integrity(struct dm_table *t, | ||
1051 | struct mapped_device *md) | ||
1052 | { | ||
1053 | struct list_head *devices = dm_table_get_devices(t); | ||
1054 | struct dm_dev_internal *dd; | ||
1055 | |||
1056 | list_for_each_entry(dd, devices, list) | ||
1057 | if (bdev_get_integrity(dd->dm_dev.bdev)) | ||
1058 | return blk_integrity_register(dm_disk(md), NULL); | ||
1059 | |||
1060 | return 0; | ||
1061 | } | ||
1062 | |||
1050 | static int table_load(struct dm_ioctl *param, size_t param_size) | 1063 | static int table_load(struct dm_ioctl *param, size_t param_size) |
1051 | { | 1064 | { |
1052 | int r; | 1065 | int r; |
@@ -1068,6 +1081,14 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
1068 | goto out; | 1081 | goto out; |
1069 | } | 1082 | } |
1070 | 1083 | ||
1084 | r = table_prealloc_integrity(t, md); | ||
1085 | if (r) { | ||
1086 | DMERR("%s: could not register integrity profile.", | ||
1087 | dm_device_name(md)); | ||
1088 | dm_table_destroy(t); | ||
1089 | goto out; | ||
1090 | } | ||
1091 | |||
1071 | down_write(&_hash_lock); | 1092 | down_write(&_hash_lock); |
1072 | hc = dm_get_mdptr(md); | 1093 | hc = dm_get_mdptr(md); |
1073 | if (!hc || hc->md != md) { | 1094 | if (!hc || hc->md != md) { |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 0a225da21272..3e3fc06cb861 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -297,7 +297,8 @@ static int run_complete_job(struct kcopyd_job *job) | |||
297 | dm_kcopyd_notify_fn fn = job->fn; | 297 | dm_kcopyd_notify_fn fn = job->fn; |
298 | struct dm_kcopyd_client *kc = job->kc; | 298 | struct dm_kcopyd_client *kc = job->kc; |
299 | 299 | ||
300 | kcopyd_put_pages(kc, job->pages); | 300 | if (job->pages) |
301 | kcopyd_put_pages(kc, job->pages); | ||
301 | mempool_free(job, kc->job_pool); | 302 | mempool_free(job, kc->job_pool); |
302 | fn(read_err, write_err, context); | 303 | fn(read_err, write_err, context); |
303 | 304 | ||
@@ -461,6 +462,7 @@ static void segment_complete(int read_err, unsigned long write_err, | |||
461 | sector_t progress = 0; | 462 | sector_t progress = 0; |
462 | sector_t count = 0; | 463 | sector_t count = 0; |
463 | struct kcopyd_job *job = (struct kcopyd_job *) context; | 464 | struct kcopyd_job *job = (struct kcopyd_job *) context; |
465 | struct dm_kcopyd_client *kc = job->kc; | ||
464 | 466 | ||
465 | mutex_lock(&job->lock); | 467 | mutex_lock(&job->lock); |
466 | 468 | ||
@@ -490,7 +492,7 @@ static void segment_complete(int read_err, unsigned long write_err, | |||
490 | 492 | ||
491 | if (count) { | 493 | if (count) { |
492 | int i; | 494 | int i; |
493 | struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool, | 495 | struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool, |
494 | GFP_NOIO); | 496 | GFP_NOIO); |
495 | 497 | ||
496 | *sub_job = *job; | 498 | *sub_job = *job; |
@@ -509,13 +511,16 @@ static void segment_complete(int read_err, unsigned long write_err, | |||
509 | } else if (atomic_dec_and_test(&job->sub_jobs)) { | 511 | } else if (atomic_dec_and_test(&job->sub_jobs)) { |
510 | 512 | ||
511 | /* | 513 | /* |
512 | * To avoid a race we must keep the job around | 514 | * Queue the completion callback to the kcopyd thread. |
513 | * until after the notify function has completed. | 515 | * |
514 | * Otherwise the client may try and stop the job | 516 | * Some callers assume that all the completions are called |
515 | * after we've completed. | 517 | * from a single thread and don't race with each other. |
518 | * | ||
519 | * We must not call the callback directly here because this | ||
520 | * code may not be executing in the thread. | ||
516 | */ | 521 | */ |
517 | job->fn(read_err, write_err, job->context); | 522 | push(&kc->complete_jobs, job); |
518 | mempool_free(job, job->kc->job_pool); | 523 | wake(kc); |
519 | } | 524 | } |
520 | } | 525 | } |
521 | 526 | ||
@@ -528,6 +533,8 @@ static void split_job(struct kcopyd_job *job) | |||
528 | { | 533 | { |
529 | int i; | 534 | int i; |
530 | 535 | ||
536 | atomic_inc(&job->kc->nr_jobs); | ||
537 | |||
531 | atomic_set(&job->sub_jobs, SPLIT_COUNT); | 538 | atomic_set(&job->sub_jobs, SPLIT_COUNT); |
532 | for (i = 0; i < SPLIT_COUNT; i++) | 539 | for (i = 0; i < SPLIT_COUNT; i++) |
533 | segment_complete(0, 0u, job); | 540 | segment_complete(0, 0u, job); |
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index bfa107f59d96..79fb53e51c70 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -142,7 +142,6 @@ static struct target_type linear_target = { | |||
142 | .status = linear_status, | 142 | .status = linear_status, |
143 | .ioctl = linear_ioctl, | 143 | .ioctl = linear_ioctl, |
144 | .merge = linear_merge, | 144 | .merge = linear_merge, |
145 | .features = DM_TARGET_SUPPORTS_BARRIERS, | ||
146 | }; | 145 | }; |
147 | 146 | ||
148 | int __init dm_linear_init(void) | 147 | int __init dm_linear_init(void) |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 095f77bf9681..6a386ab4f7eb 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/device-mapper.h> | 8 | #include <linux/device-mapper.h> |
9 | 9 | ||
10 | #include "dm-path-selector.h" | 10 | #include "dm-path-selector.h" |
11 | #include "dm-bio-list.h" | ||
12 | #include "dm-bio-record.h" | 11 | #include "dm-bio-record.h" |
13 | #include "dm-uevent.h" | 12 | #include "dm-uevent.h" |
14 | 13 | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 536ef0bef154..076fbb4e967a 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -5,7 +5,6 @@ | |||
5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "dm-bio-list.h" | ||
9 | #include "dm-bio-record.h" | 8 | #include "dm-bio-record.h" |
10 | 9 | ||
11 | #include <linux/init.h> | 10 | #include <linux/init.h> |
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 59f8d9df9e1a..7b899be0b087 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | 15 | ||
16 | #include "dm.h" | 16 | #include "dm.h" |
17 | #include "dm-bio-list.h" | ||
18 | 17 | ||
19 | #define DM_MSG_PREFIX "region hash" | 18 | #define DM_MSG_PREFIX "region hash" |
20 | 19 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 981a0413068f..d73f17fc7778 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/workqueue.h> | 22 | #include <linux/workqueue.h> |
23 | 23 | ||
24 | #include "dm-exception-store.h" | 24 | #include "dm-exception-store.h" |
25 | #include "dm-bio-list.h" | ||
26 | 25 | ||
27 | #define DM_MSG_PREFIX "snapshots" | 26 | #define DM_MSG_PREFIX "snapshots" |
28 | 27 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e8361b191b9b..429b50b975d5 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -52,8 +52,6 @@ struct dm_table { | |||
52 | sector_t *highs; | 52 | sector_t *highs; |
53 | struct dm_target *targets; | 53 | struct dm_target *targets; |
54 | 54 | ||
55 | unsigned barriers_supported:1; | ||
56 | |||
57 | /* | 55 | /* |
58 | * Indicates the rw permissions for the new logical | 56 | * Indicates the rw permissions for the new logical |
59 | * device. This should be a combination of FMODE_READ | 57 | * device. This should be a combination of FMODE_READ |
@@ -243,7 +241,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, | |||
243 | 241 | ||
244 | INIT_LIST_HEAD(&t->devices); | 242 | INIT_LIST_HEAD(&t->devices); |
245 | atomic_set(&t->holders, 0); | 243 | atomic_set(&t->holders, 0); |
246 | t->barriers_supported = 1; | ||
247 | 244 | ||
248 | if (!num_targets) | 245 | if (!num_targets) |
249 | num_targets = KEYS_PER_NODE; | 246 | num_targets = KEYS_PER_NODE; |
@@ -751,10 +748,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
751 | /* FIXME: the plan is to combine high here and then have | 748 | /* FIXME: the plan is to combine high here and then have |
752 | * the merge fn apply the target level restrictions. */ | 749 | * the merge fn apply the target level restrictions. */ |
753 | combine_restrictions_low(&t->limits, &tgt->limits); | 750 | combine_restrictions_low(&t->limits, &tgt->limits); |
754 | |||
755 | if (!(tgt->type->features & DM_TARGET_SUPPORTS_BARRIERS)) | ||
756 | t->barriers_supported = 0; | ||
757 | |||
758 | return 0; | 751 | return 0; |
759 | 752 | ||
760 | bad: | 753 | bad: |
@@ -799,12 +792,6 @@ int dm_table_complete(struct dm_table *t) | |||
799 | 792 | ||
800 | check_for_valid_limits(&t->limits); | 793 | check_for_valid_limits(&t->limits); |
801 | 794 | ||
802 | /* | ||
803 | * We only support barriers if there is exactly one underlying device. | ||
804 | */ | ||
805 | if (!list_is_singular(&t->devices)) | ||
806 | t->barriers_supported = 0; | ||
807 | |||
808 | /* how many indexes will the btree have ? */ | 795 | /* how many indexes will the btree have ? */ |
809 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); | 796 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); |
810 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); | 797 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); |
@@ -879,6 +866,45 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |||
879 | return &t->targets[(KEYS_PER_NODE * n) + k]; | 866 | return &t->targets[(KEYS_PER_NODE * n) + k]; |
880 | } | 867 | } |
881 | 868 | ||
869 | /* | ||
870 | * Set the integrity profile for this device if all devices used have | ||
871 | * matching profiles. | ||
872 | */ | ||
873 | static void dm_table_set_integrity(struct dm_table *t) | ||
874 | { | ||
875 | struct list_head *devices = dm_table_get_devices(t); | ||
876 | struct dm_dev_internal *prev = NULL, *dd = NULL; | ||
877 | |||
878 | if (!blk_get_integrity(dm_disk(t->md))) | ||
879 | return; | ||
880 | |||
881 | list_for_each_entry(dd, devices, list) { | ||
882 | if (prev && | ||
883 | blk_integrity_compare(prev->dm_dev.bdev->bd_disk, | ||
884 | dd->dm_dev.bdev->bd_disk) < 0) { | ||
885 | DMWARN("%s: integrity not set: %s and %s mismatch", | ||
886 | dm_device_name(t->md), | ||
887 | prev->dm_dev.bdev->bd_disk->disk_name, | ||
888 | dd->dm_dev.bdev->bd_disk->disk_name); | ||
889 | goto no_integrity; | ||
890 | } | ||
891 | prev = dd; | ||
892 | } | ||
893 | |||
894 | if (!prev || !bdev_get_integrity(prev->dm_dev.bdev)) | ||
895 | goto no_integrity; | ||
896 | |||
897 | blk_integrity_register(dm_disk(t->md), | ||
898 | bdev_get_integrity(prev->dm_dev.bdev)); | ||
899 | |||
900 | return; | ||
901 | |||
902 | no_integrity: | ||
903 | blk_integrity_register(dm_disk(t->md), NULL); | ||
904 | |||
905 | return; | ||
906 | } | ||
907 | |||
882 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | 908 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) |
883 | { | 909 | { |
884 | /* | 910 | /* |
@@ -899,6 +925,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |||
899 | else | 925 | else |
900 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); | 926 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); |
901 | 927 | ||
928 | dm_table_set_integrity(t); | ||
902 | } | 929 | } |
903 | 930 | ||
904 | unsigned int dm_table_get_num_targets(struct dm_table *t) | 931 | unsigned int dm_table_get_num_targets(struct dm_table *t) |
@@ -1019,12 +1046,6 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) | |||
1019 | return t->md; | 1046 | return t->md; |
1020 | } | 1047 | } |
1021 | 1048 | ||
1022 | int dm_table_barrier_ok(struct dm_table *t) | ||
1023 | { | ||
1024 | return t->barriers_supported; | ||
1025 | } | ||
1026 | EXPORT_SYMBOL(dm_table_barrier_ok); | ||
1027 | |||
1028 | EXPORT_SYMBOL(dm_vcalloc); | 1049 | EXPORT_SYMBOL(dm_vcalloc); |
1029 | EXPORT_SYMBOL(dm_get_device); | 1050 | EXPORT_SYMBOL(dm_get_device); |
1030 | EXPORT_SYMBOL(dm_put_device); | 1051 | EXPORT_SYMBOL(dm_put_device); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 788ba96a6256..424f7b048c30 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -6,7 +6,6 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "dm.h" | 8 | #include "dm.h" |
9 | #include "dm-bio-list.h" | ||
10 | #include "dm-uevent.h" | 9 | #include "dm-uevent.h" |
11 | 10 | ||
12 | #include <linux/init.h> | 11 | #include <linux/init.h> |
@@ -89,12 +88,13 @@ union map_info *dm_get_mapinfo(struct bio *bio) | |||
89 | /* | 88 | /* |
90 | * Bits for the md->flags field. | 89 | * Bits for the md->flags field. |
91 | */ | 90 | */ |
92 | #define DMF_BLOCK_IO 0 | 91 | #define DMF_BLOCK_IO_FOR_SUSPEND 0 |
93 | #define DMF_SUSPENDED 1 | 92 | #define DMF_SUSPENDED 1 |
94 | #define DMF_FROZEN 2 | 93 | #define DMF_FROZEN 2 |
95 | #define DMF_FREEING 3 | 94 | #define DMF_FREEING 3 |
96 | #define DMF_DELETING 4 | 95 | #define DMF_DELETING 4 |
97 | #define DMF_NOFLUSH_SUSPENDING 5 | 96 | #define DMF_NOFLUSH_SUSPENDING 5 |
97 | #define DMF_QUEUE_IO_TO_THREAD 6 | ||
98 | 98 | ||
99 | /* | 99 | /* |
100 | * Work processed by per-device workqueue. | 100 | * Work processed by per-device workqueue. |
@@ -124,6 +124,11 @@ struct mapped_device { | |||
124 | spinlock_t deferred_lock; | 124 | spinlock_t deferred_lock; |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * An error from the barrier request currently being processed. | ||
128 | */ | ||
129 | int barrier_error; | ||
130 | |||
131 | /* | ||
127 | * Processing queue (flush/barriers) | 132 | * Processing queue (flush/barriers) |
128 | */ | 133 | */ |
129 | struct workqueue_struct *wq; | 134 | struct workqueue_struct *wq; |
@@ -424,6 +429,10 @@ static void end_io_acct(struct dm_io *io) | |||
424 | part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); | 429 | part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); |
425 | part_stat_unlock(); | 430 | part_stat_unlock(); |
426 | 431 | ||
432 | /* | ||
433 | * After this is decremented the bio must not be touched if it is | ||
434 | * a barrier. | ||
435 | */ | ||
427 | dm_disk(md)->part0.in_flight = pending = | 436 | dm_disk(md)->part0.in_flight = pending = |
428 | atomic_dec_return(&md->pending); | 437 | atomic_dec_return(&md->pending); |
429 | 438 | ||
@@ -435,21 +444,18 @@ static void end_io_acct(struct dm_io *io) | |||
435 | /* | 444 | /* |
436 | * Add the bio to the list of deferred io. | 445 | * Add the bio to the list of deferred io. |
437 | */ | 446 | */ |
438 | static int queue_io(struct mapped_device *md, struct bio *bio) | 447 | static void queue_io(struct mapped_device *md, struct bio *bio) |
439 | { | 448 | { |
440 | down_write(&md->io_lock); | 449 | down_write(&md->io_lock); |
441 | 450 | ||
442 | if (!test_bit(DMF_BLOCK_IO, &md->flags)) { | ||
443 | up_write(&md->io_lock); | ||
444 | return 1; | ||
445 | } | ||
446 | |||
447 | spin_lock_irq(&md->deferred_lock); | 451 | spin_lock_irq(&md->deferred_lock); |
448 | bio_list_add(&md->deferred, bio); | 452 | bio_list_add(&md->deferred, bio); |
449 | spin_unlock_irq(&md->deferred_lock); | 453 | spin_unlock_irq(&md->deferred_lock); |
450 | 454 | ||
455 | if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) | ||
456 | queue_work(md->wq, &md->work); | ||
457 | |||
451 | up_write(&md->io_lock); | 458 | up_write(&md->io_lock); |
452 | return 0; /* deferred successfully */ | ||
453 | } | 459 | } |
454 | 460 | ||
455 | /* | 461 | /* |
@@ -533,25 +539,35 @@ static void dec_pending(struct dm_io *io, int error) | |||
533 | */ | 539 | */ |
534 | spin_lock_irqsave(&md->deferred_lock, flags); | 540 | spin_lock_irqsave(&md->deferred_lock, flags); |
535 | if (__noflush_suspending(md)) | 541 | if (__noflush_suspending(md)) |
536 | bio_list_add(&md->deferred, io->bio); | 542 | bio_list_add_head(&md->deferred, io->bio); |
537 | else | 543 | else |
538 | /* noflush suspend was interrupted. */ | 544 | /* noflush suspend was interrupted. */ |
539 | io->error = -EIO; | 545 | io->error = -EIO; |
540 | spin_unlock_irqrestore(&md->deferred_lock, flags); | 546 | spin_unlock_irqrestore(&md->deferred_lock, flags); |
541 | } | 547 | } |
542 | 548 | ||
543 | end_io_acct(io); | ||
544 | |||
545 | io_error = io->error; | 549 | io_error = io->error; |
546 | bio = io->bio; | 550 | bio = io->bio; |
547 | 551 | ||
548 | free_io(md, io); | 552 | if (bio_barrier(bio)) { |
553 | /* | ||
554 | * There can be just one barrier request so we use | ||
555 | * a per-device variable for error reporting. | ||
556 | * Note that you can't touch the bio after end_io_acct | ||
557 | */ | ||
558 | md->barrier_error = io_error; | ||
559 | end_io_acct(io); | ||
560 | } else { | ||
561 | end_io_acct(io); | ||
549 | 562 | ||
550 | if (io_error != DM_ENDIO_REQUEUE) { | 563 | if (io_error != DM_ENDIO_REQUEUE) { |
551 | trace_block_bio_complete(md->queue, bio); | 564 | trace_block_bio_complete(md->queue, bio); |
552 | 565 | ||
553 | bio_endio(bio, io_error); | 566 | bio_endio(bio, io_error); |
567 | } | ||
554 | } | 568 | } |
569 | |||
570 | free_io(md, io); | ||
555 | } | 571 | } |
556 | } | 572 | } |
557 | 573 | ||
@@ -693,13 +709,19 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
693 | 709 | ||
694 | clone->bi_sector = sector; | 710 | clone->bi_sector = sector; |
695 | clone->bi_bdev = bio->bi_bdev; | 711 | clone->bi_bdev = bio->bi_bdev; |
696 | clone->bi_rw = bio->bi_rw; | 712 | clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); |
697 | clone->bi_vcnt = 1; | 713 | clone->bi_vcnt = 1; |
698 | clone->bi_size = to_bytes(len); | 714 | clone->bi_size = to_bytes(len); |
699 | clone->bi_io_vec->bv_offset = offset; | 715 | clone->bi_io_vec->bv_offset = offset; |
700 | clone->bi_io_vec->bv_len = clone->bi_size; | 716 | clone->bi_io_vec->bv_len = clone->bi_size; |
701 | clone->bi_flags |= 1 << BIO_CLONED; | 717 | clone->bi_flags |= 1 << BIO_CLONED; |
702 | 718 | ||
719 | if (bio_integrity(bio)) { | ||
720 | bio_integrity_clone(clone, bio, GFP_NOIO); | ||
721 | bio_integrity_trim(clone, | ||
722 | bio_sector_offset(bio, idx, offset), len); | ||
723 | } | ||
724 | |||
703 | return clone; | 725 | return clone; |
704 | } | 726 | } |
705 | 727 | ||
@@ -714,6 +736,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
714 | 736 | ||
715 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); | 737 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); |
716 | __bio_clone(clone, bio); | 738 | __bio_clone(clone, bio); |
739 | clone->bi_rw &= ~(1 << BIO_RW_BARRIER); | ||
717 | clone->bi_destructor = dm_bio_destructor; | 740 | clone->bi_destructor = dm_bio_destructor; |
718 | clone->bi_sector = sector; | 741 | clone->bi_sector = sector; |
719 | clone->bi_idx = idx; | 742 | clone->bi_idx = idx; |
@@ -721,6 +744,14 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
721 | clone->bi_size = to_bytes(len); | 744 | clone->bi_size = to_bytes(len); |
722 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); | 745 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); |
723 | 746 | ||
747 | if (bio_integrity(bio)) { | ||
748 | bio_integrity_clone(clone, bio, GFP_NOIO); | ||
749 | |||
750 | if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) | ||
751 | bio_integrity_trim(clone, | ||
752 | bio_sector_offset(bio, idx, 0), len); | ||
753 | } | ||
754 | |||
724 | return clone; | 755 | return clone; |
725 | } | 756 | } |
726 | 757 | ||
@@ -834,14 +865,13 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
834 | 865 | ||
835 | ci.map = dm_get_table(md); | 866 | ci.map = dm_get_table(md); |
836 | if (unlikely(!ci.map)) { | 867 | if (unlikely(!ci.map)) { |
837 | bio_io_error(bio); | 868 | if (!bio_barrier(bio)) |
838 | return; | 869 | bio_io_error(bio); |
839 | } | 870 | else |
840 | if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) { | 871 | md->barrier_error = -EIO; |
841 | dm_table_put(ci.map); | ||
842 | bio_endio(bio, -EOPNOTSUPP); | ||
843 | return; | 872 | return; |
844 | } | 873 | } |
874 | |||
845 | ci.md = md; | 875 | ci.md = md; |
846 | ci.bio = bio; | 876 | ci.bio = bio; |
847 | ci.io = alloc_io(md); | 877 | ci.io = alloc_io(md); |
@@ -918,7 +948,6 @@ out: | |||
918 | */ | 948 | */ |
919 | static int dm_request(struct request_queue *q, struct bio *bio) | 949 | static int dm_request(struct request_queue *q, struct bio *bio) |
920 | { | 950 | { |
921 | int r = -EIO; | ||
922 | int rw = bio_data_dir(bio); | 951 | int rw = bio_data_dir(bio); |
923 | struct mapped_device *md = q->queuedata; | 952 | struct mapped_device *md = q->queuedata; |
924 | int cpu; | 953 | int cpu; |
@@ -931,34 +960,27 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
931 | part_stat_unlock(); | 960 | part_stat_unlock(); |
932 | 961 | ||
933 | /* | 962 | /* |
934 | * If we're suspended we have to queue | 963 | * If we're suspended or the thread is processing barriers |
935 | * this io for later. | 964 | * we have to queue this io for later. |
936 | */ | 965 | */ |
937 | while (test_bit(DMF_BLOCK_IO, &md->flags)) { | 966 | if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || |
967 | unlikely(bio_barrier(bio))) { | ||
938 | up_read(&md->io_lock); | 968 | up_read(&md->io_lock); |
939 | 969 | ||
940 | if (bio_rw(bio) != READA) | 970 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && |
941 | r = queue_io(md, bio); | 971 | bio_rw(bio) == READA) { |
972 | bio_io_error(bio); | ||
973 | return 0; | ||
974 | } | ||
942 | 975 | ||
943 | if (r <= 0) | 976 | queue_io(md, bio); |
944 | goto out_req; | ||
945 | 977 | ||
946 | /* | 978 | return 0; |
947 | * We're in a while loop, because someone could suspend | ||
948 | * before we get to the following read lock. | ||
949 | */ | ||
950 | down_read(&md->io_lock); | ||
951 | } | 979 | } |
952 | 980 | ||
953 | __split_and_process_bio(md, bio); | 981 | __split_and_process_bio(md, bio); |
954 | up_read(&md->io_lock); | 982 | up_read(&md->io_lock); |
955 | return 0; | 983 | return 0; |
956 | |||
957 | out_req: | ||
958 | if (r < 0) | ||
959 | bio_io_error(bio); | ||
960 | |||
961 | return 0; | ||
962 | } | 984 | } |
963 | 985 | ||
964 | static void dm_unplug_all(struct request_queue *q) | 986 | static void dm_unplug_all(struct request_queue *q) |
@@ -978,7 +1000,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
978 | struct mapped_device *md = congested_data; | 1000 | struct mapped_device *md = congested_data; |
979 | struct dm_table *map; | 1001 | struct dm_table *map; |
980 | 1002 | ||
981 | if (!test_bit(DMF_BLOCK_IO, &md->flags)) { | 1003 | if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { |
982 | map = dm_get_table(md); | 1004 | map = dm_get_table(md); |
983 | if (map) { | 1005 | if (map) { |
984 | r = dm_table_any_congested(map, bdi_bits); | 1006 | r = dm_table_any_congested(map, bdi_bits); |
@@ -1193,6 +1215,7 @@ static void free_dev(struct mapped_device *md) | |||
1193 | mempool_destroy(md->tio_pool); | 1215 | mempool_destroy(md->tio_pool); |
1194 | mempool_destroy(md->io_pool); | 1216 | mempool_destroy(md->io_pool); |
1195 | bioset_free(md->bs); | 1217 | bioset_free(md->bs); |
1218 | blk_integrity_unregister(md->disk); | ||
1196 | del_gendisk(md->disk); | 1219 | del_gendisk(md->disk); |
1197 | free_minor(minor); | 1220 | free_minor(minor); |
1198 | 1221 | ||
@@ -1406,6 +1429,36 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
1406 | return r; | 1429 | return r; |
1407 | } | 1430 | } |
1408 | 1431 | ||
1432 | static int dm_flush(struct mapped_device *md) | ||
1433 | { | ||
1434 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); | ||
1435 | return 0; | ||
1436 | } | ||
1437 | |||
1438 | static void process_barrier(struct mapped_device *md, struct bio *bio) | ||
1439 | { | ||
1440 | int error = dm_flush(md); | ||
1441 | |||
1442 | if (unlikely(error)) { | ||
1443 | bio_endio(bio, error); | ||
1444 | return; | ||
1445 | } | ||
1446 | if (bio_empty_barrier(bio)) { | ||
1447 | bio_endio(bio, 0); | ||
1448 | return; | ||
1449 | } | ||
1450 | |||
1451 | __split_and_process_bio(md, bio); | ||
1452 | |||
1453 | error = dm_flush(md); | ||
1454 | |||
1455 | if (!error && md->barrier_error) | ||
1456 | error = md->barrier_error; | ||
1457 | |||
1458 | if (md->barrier_error != DM_ENDIO_REQUEUE) | ||
1459 | bio_endio(bio, error); | ||
1460 | } | ||
1461 | |||
1409 | /* | 1462 | /* |
1410 | * Process the deferred bios | 1463 | * Process the deferred bios |
1411 | */ | 1464 | */ |
@@ -1417,25 +1470,34 @@ static void dm_wq_work(struct work_struct *work) | |||
1417 | 1470 | ||
1418 | down_write(&md->io_lock); | 1471 | down_write(&md->io_lock); |
1419 | 1472 | ||
1420 | next_bio: | 1473 | while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { |
1421 | spin_lock_irq(&md->deferred_lock); | 1474 | spin_lock_irq(&md->deferred_lock); |
1422 | c = bio_list_pop(&md->deferred); | 1475 | c = bio_list_pop(&md->deferred); |
1423 | spin_unlock_irq(&md->deferred_lock); | 1476 | spin_unlock_irq(&md->deferred_lock); |
1424 | 1477 | ||
1425 | if (c) { | 1478 | if (!c) { |
1426 | __split_and_process_bio(md, c); | 1479 | clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); |
1427 | goto next_bio; | 1480 | break; |
1428 | } | 1481 | } |
1429 | 1482 | ||
1430 | clear_bit(DMF_BLOCK_IO, &md->flags); | 1483 | up_write(&md->io_lock); |
1484 | |||
1485 | if (bio_barrier(c)) | ||
1486 | process_barrier(md, c); | ||
1487 | else | ||
1488 | __split_and_process_bio(md, c); | ||
1489 | |||
1490 | down_write(&md->io_lock); | ||
1491 | } | ||
1431 | 1492 | ||
1432 | up_write(&md->io_lock); | 1493 | up_write(&md->io_lock); |
1433 | } | 1494 | } |
1434 | 1495 | ||
1435 | static void dm_queue_flush(struct mapped_device *md) | 1496 | static void dm_queue_flush(struct mapped_device *md) |
1436 | { | 1497 | { |
1498 | clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); | ||
1499 | smp_mb__after_clear_bit(); | ||
1437 | queue_work(md->wq, &md->work); | 1500 | queue_work(md->wq, &md->work); |
1438 | flush_workqueue(md->wq); | ||
1439 | } | 1501 | } |
1440 | 1502 | ||
1441 | /* | 1503 | /* |
@@ -1553,20 +1615,36 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1553 | } | 1615 | } |
1554 | 1616 | ||
1555 | /* | 1617 | /* |
1556 | * First we set the BLOCK_IO flag so no more ios will be mapped. | 1618 | * Here we must make sure that no processes are submitting requests |
1619 | * to target drivers i.e. no one may be executing | ||
1620 | * __split_and_process_bio. This is called from dm_request and | ||
1621 | * dm_wq_work. | ||
1622 | * | ||
1623 | * To get all processes out of __split_and_process_bio in dm_request, | ||
1624 | * we take the write lock. To prevent any process from reentering | ||
1625 | * __split_and_process_bio from dm_request, we set | ||
1626 | * DMF_QUEUE_IO_TO_THREAD. | ||
1627 | * | ||
1628 | * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND | ||
1629 | * and call flush_workqueue(md->wq). flush_workqueue will wait until | ||
1630 | * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any | ||
1631 | * further calls to __split_and_process_bio from dm_wq_work. | ||
1557 | */ | 1632 | */ |
1558 | down_write(&md->io_lock); | 1633 | down_write(&md->io_lock); |
1559 | set_bit(DMF_BLOCK_IO, &md->flags); | 1634 | set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); |
1560 | 1635 | set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); | |
1561 | up_write(&md->io_lock); | 1636 | up_write(&md->io_lock); |
1562 | 1637 | ||
1638 | flush_workqueue(md->wq); | ||
1639 | |||
1563 | /* | 1640 | /* |
1564 | * Wait for the already-mapped ios to complete. | 1641 | * At this point no more requests are entering target request routines. |
1642 | * We call dm_wait_for_completion to wait for all existing requests | ||
1643 | * to finish. | ||
1565 | */ | 1644 | */ |
1566 | r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); | 1645 | r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); |
1567 | 1646 | ||
1568 | down_write(&md->io_lock); | 1647 | down_write(&md->io_lock); |
1569 | |||
1570 | if (noflush) | 1648 | if (noflush) |
1571 | clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); | 1649 | clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); |
1572 | up_write(&md->io_lock); | 1650 | up_write(&md->io_lock); |
@@ -1579,6 +1657,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1579 | goto out; /* pushback list is already flushed, so skip flush */ | 1657 | goto out; /* pushback list is already flushed, so skip flush */ |
1580 | } | 1658 | } |
1581 | 1659 | ||
1660 | /* | ||
1661 | * If dm_wait_for_completion returned 0, the device is completely | ||
1662 | * quiescent now. There is no request-processing activity. All new | ||
1663 | * requests are being added to md->deferred list. | ||
1664 | */ | ||
1665 | |||
1582 | dm_table_postsuspend_targets(map); | 1666 | dm_table_postsuspend_targets(map); |
1583 | 1667 | ||
1584 | set_bit(DMF_SUSPENDED, &md->flags); | 1668 | set_bit(DMF_SUSPENDED, &md->flags); |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index b48397c0abbd..a31506d93e91 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -52,7 +52,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits); | |||
52 | * To check the return value from dm_table_find_target(). | 52 | * To check the return value from dm_table_find_target(). |
53 | */ | 53 | */ |
54 | #define dm_target_is_valid(t) ((t)->table) | 54 | #define dm_target_is_valid(t) ((t)->table) |
55 | int dm_table_barrier_ok(struct dm_table *t); | ||
56 | 55 | ||
57 | /*----------------------------------------------------------------- | 56 | /*----------------------------------------------------------------- |
58 | * A registry of target types. | 57 | * A registry of target types. |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 274b491a11c1..36df9109cde1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/blkdev.h> | 35 | #include <linux/blkdev.h> |
36 | #include <linux/seq_file.h> | 36 | #include <linux/seq_file.h> |
37 | #include "md.h" | 37 | #include "md.h" |
38 | #include "dm-bio-list.h" | ||
39 | #include "raid1.h" | 38 | #include "raid1.h" |
40 | #include "bitmap.h" | 39 | #include "bitmap.h" |
41 | 40 | ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e293d92641ac..81a54f17417e 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/blkdev.h> | 22 | #include <linux/blkdev.h> |
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | #include "md.h" | 24 | #include "md.h" |
25 | #include "dm-bio-list.h" | ||
26 | #include "raid10.h" | 25 | #include "raid10.h" |
27 | #include "bitmap.h" | 26 | #include "bitmap.h" |
28 | 27 | ||
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig index 772990415f99..68eb4493f991 100644 --- a/drivers/media/dvb/ttpci/Kconfig +++ b/drivers/media/dvb/ttpci/Kconfig | |||
@@ -28,25 +28,12 @@ config DVB_AV7110 | |||
28 | download/extract it, and then copy it to /usr/lib/hotplug/firmware | 28 | download/extract it, and then copy it to /usr/lib/hotplug/firmware |
29 | or /lib/firmware (depending on configuration of firmware hotplug). | 29 | or /lib/firmware (depending on configuration of firmware hotplug). |
30 | 30 | ||
31 | Say Y if you own such a card and want to use it. | 31 | Alternatively, you can download the file and use the kernel's |
32 | 32 | EXTRA_FIRMWARE configuration option to build it into your | |
33 | config DVB_AV7110_FIRMWARE | 33 | kernel image by adding the filename to the EXTRA_FIRMWARE |
34 | bool "Compile AV7110 firmware into the driver" | 34 | configuration option string. |
35 | depends on DVB_AV7110 && !STANDALONE | ||
36 | default y if DVB_AV7110=y | ||
37 | help | ||
38 | The AV7110 firmware is normally loaded by the firmware hotplug manager. | ||
39 | If you want to compile the firmware into the driver you need to say | ||
40 | Y here and provide the correct path of the firmware. You need this | ||
41 | option if you want to compile the whole driver statically into the | ||
42 | kernel. | ||
43 | 35 | ||
44 | All other people say N. | 36 | Say Y if you own such a card and want to use it. |
45 | |||
46 | config DVB_AV7110_FIRMWARE_FILE | ||
47 | string "Full pathname of av7110 firmware file" | ||
48 | depends on DVB_AV7110_FIRMWARE | ||
49 | default "/usr/lib/hotplug/firmware/dvb-ttpci-01.fw" | ||
50 | 37 | ||
51 | config DVB_AV7110_OSD | 38 | config DVB_AV7110_OSD |
52 | bool "AV7110 OSD support" | 39 | bool "AV7110 OSD support" |
diff --git a/drivers/media/dvb/ttpci/Makefile b/drivers/media/dvb/ttpci/Makefile index 71451237294c..8a4d5bb20a5b 100644 --- a/drivers/media/dvb/ttpci/Makefile +++ b/drivers/media/dvb/ttpci/Makefile | |||
@@ -19,12 +19,3 @@ obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o | |||
19 | 19 | ||
20 | EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/ | 20 | EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/ |
21 | EXTRA_CFLAGS += -Idrivers/media/common/tuners | 21 | EXTRA_CFLAGS += -Idrivers/media/common/tuners |
22 | |||
23 | hostprogs-y := fdump | ||
24 | |||
25 | ifeq ($(CONFIG_DVB_AV7110_FIRMWARE),y) | ||
26 | $(obj)/av7110.o: $(obj)/av7110_firm.h | ||
27 | |||
28 | $(obj)/av7110_firm.h: $(obj)/fdump | ||
29 | $(obj)/fdump $(CONFIG_DVB_AV7110_FIRMWARE_FILE) dvb_ttpci_fw $@ | ||
30 | endif | ||
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c index 4624cee93e74..d1d959ed37b7 100644 --- a/drivers/media/dvb/ttpci/av7110.c +++ b/drivers/media/dvb/ttpci/av7110.c | |||
@@ -1518,20 +1518,6 @@ static int check_firmware(struct av7110* av7110) | |||
1518 | return 0; | 1518 | return 0; |
1519 | } | 1519 | } |
1520 | 1520 | ||
1521 | #ifdef CONFIG_DVB_AV7110_FIRMWARE_FILE | ||
1522 | #include "av7110_firm.h" | ||
1523 | static void put_firmware(struct av7110* av7110) | ||
1524 | { | ||
1525 | av7110->bin_fw = NULL; | ||
1526 | } | ||
1527 | |||
1528 | static inline int get_firmware(struct av7110* av7110) | ||
1529 | { | ||
1530 | av7110->bin_fw = dvb_ttpci_fw; | ||
1531 | av7110->size_fw = sizeof(dvb_ttpci_fw); | ||
1532 | return check_firmware(av7110); | ||
1533 | } | ||
1534 | #else | ||
1535 | static void put_firmware(struct av7110* av7110) | 1521 | static void put_firmware(struct av7110* av7110) |
1536 | { | 1522 | { |
1537 | vfree(av7110->bin_fw); | 1523 | vfree(av7110->bin_fw); |
@@ -1580,8 +1566,6 @@ static int get_firmware(struct av7110* av7110) | |||
1580 | release_firmware(fw); | 1566 | release_firmware(fw); |
1581 | return ret; | 1567 | return ret; |
1582 | } | 1568 | } |
1583 | #endif | ||
1584 | |||
1585 | 1569 | ||
1586 | static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params) | 1570 | static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params) |
1587 | { | 1571 | { |
diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c index 3a3f5279e927..5e3f88911a1d 100644 --- a/drivers/media/dvb/ttpci/av7110_hw.c +++ b/drivers/media/dvb/ttpci/av7110_hw.c | |||
@@ -198,29 +198,10 @@ static int load_dram(struct av7110 *av7110, u32 *data, int len) | |||
198 | 198 | ||
199 | /* we cannot write av7110 DRAM directly, so load a bootloader into | 199 | /* we cannot write av7110 DRAM directly, so load a bootloader into |
200 | * the DPRAM which implements a simple boot protocol */ | 200 | * the DPRAM which implements a simple boot protocol */ |
201 | static u8 bootcode[] = { | ||
202 | 0xea, 0x00, 0x00, 0x0e, 0xe1, 0xb0, 0xf0, 0x0e, 0xe2, 0x5e, 0xf0, 0x04, | ||
203 | 0xe2, 0x5e, 0xf0, 0x04, 0xe2, 0x5e, 0xf0, 0x08, 0xe2, 0x5e, 0xf0, 0x04, | ||
204 | 0xe2, 0x5e, 0xf0, 0x04, 0xe2, 0x5e, 0xf0, 0x04, 0x2c, 0x00, 0x00, 0x24, | ||
205 | 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x34, | ||
206 | 0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0x5a, 0x5a, 0x00, 0x1f, 0x15, 0x55, | ||
207 | 0x00, 0x00, 0x00, 0x09, 0xe5, 0x9f, 0xd0, 0x7c, 0xe5, 0x9f, 0x40, 0x74, | ||
208 | 0xe3, 0xa0, 0x00, 0x00, 0xe5, 0x84, 0x00, 0x00, 0xe5, 0x84, 0x00, 0x04, | ||
209 | 0xe5, 0x9f, 0x10, 0x70, 0xe5, 0x9f, 0x20, 0x70, 0xe5, 0x9f, 0x30, 0x64, | ||
210 | 0xe8, 0xb1, 0x1f, 0xe0, 0xe8, 0xa3, 0x1f, 0xe0, 0xe1, 0x51, 0x00, 0x02, | ||
211 | 0xda, 0xff, 0xff, 0xfb, 0xe5, 0x9f, 0xf0, 0x50, 0xe1, 0xd4, 0x10, 0xb0, | ||
212 | 0xe3, 0x51, 0x00, 0x00, 0x0a, 0xff, 0xff, 0xfc, 0xe1, 0xa0, 0x10, 0x0d, | ||
213 | 0xe5, 0x94, 0x30, 0x04, 0xe1, 0xd4, 0x20, 0xb2, 0xe2, 0x82, 0x20, 0x3f, | ||
214 | 0xe1, 0xb0, 0x23, 0x22, 0x03, 0xa0, 0x00, 0x02, 0xe1, 0xc4, 0x00, 0xb0, | ||
215 | 0x0a, 0xff, 0xff, 0xf4, 0xe8, 0xb1, 0x1f, 0xe0, 0xe8, 0xa3, 0x1f, 0xe0, | ||
216 | 0xe8, 0xb1, 0x1f, 0xe0, 0xe8, 0xa3, 0x1f, 0xe0, 0xe2, 0x52, 0x20, 0x01, | ||
217 | 0x1a, 0xff, 0xff, 0xf9, 0xe2, 0x2d, 0xdb, 0x05, 0xea, 0xff, 0xff, 0xec, | ||
218 | 0x2c, 0x00, 0x03, 0xf8, 0x2c, 0x00, 0x04, 0x00, 0x9e, 0x00, 0x08, 0x00, | ||
219 | 0x2c, 0x00, 0x00, 0x74, 0x2c, 0x00, 0x00, 0xc0 | ||
220 | }; | ||
221 | |||
222 | int av7110_bootarm(struct av7110 *av7110) | 201 | int av7110_bootarm(struct av7110 *av7110) |
223 | { | 202 | { |
203 | const struct firmware *fw; | ||
204 | const char *fw_name = "av7110/bootcode.bin"; | ||
224 | struct saa7146_dev *dev = av7110->dev; | 205 | struct saa7146_dev *dev = av7110->dev; |
225 | u32 ret; | 206 | u32 ret; |
226 | int i; | 207 | int i; |
@@ -261,7 +242,15 @@ int av7110_bootarm(struct av7110 *av7110) | |||
261 | //saa7146_setgpio(dev, DEBI_DONE_LINE, SAA7146_GPIO_INPUT); | 242 | //saa7146_setgpio(dev, DEBI_DONE_LINE, SAA7146_GPIO_INPUT); |
262 | //saa7146_setgpio(dev, 3, SAA7146_GPIO_INPUT); | 243 | //saa7146_setgpio(dev, 3, SAA7146_GPIO_INPUT); |
263 | 244 | ||
264 | mwdebi(av7110, DEBISWAB, DPRAM_BASE, bootcode, sizeof(bootcode)); | 245 | ret = request_firmware(&fw, fw_name, &dev->pci->dev); |
246 | if (ret) { | ||
247 | printk(KERN_ERR "dvb-ttpci: Failed to load firmware \"%s\"\n", | ||
248 | fw_name); | ||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | mwdebi(av7110, DEBISWAB, DPRAM_BASE, fw->data, fw->size); | ||
253 | release_firmware(fw); | ||
265 | iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2); | 254 | iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2); |
266 | 255 | ||
267 | if (saa7146_wait_for_debi_done(av7110->dev, 1)) { | 256 | if (saa7146_wait_for_debi_done(av7110->dev, 1)) { |
@@ -302,7 +291,7 @@ int av7110_bootarm(struct av7110 *av7110) | |||
302 | av7110->arm_ready = 1; | 291 | av7110->arm_ready = 1; |
303 | return 0; | 292 | return 0; |
304 | } | 293 | } |
305 | 294 | MODULE_FIRMWARE("av7110/bootcode.bin"); | |
306 | 295 | ||
307 | /**************************************************************************** | 296 | /**************************************************************************** |
308 | * DEBI command polling | 297 | * DEBI command polling |
diff --git a/drivers/media/dvb/ttpci/av7110_hw.h b/drivers/media/dvb/ttpci/av7110_hw.h index ca99e5c1fc8a..1634aba5cb84 100644 --- a/drivers/media/dvb/ttpci/av7110_hw.h +++ b/drivers/media/dvb/ttpci/av7110_hw.h | |||
@@ -390,7 +390,8 @@ static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, | |||
390 | } | 390 | } |
391 | 391 | ||
392 | /* buffer writes */ | 392 | /* buffer writes */ |
393 | static inline void mwdebi(struct av7110 *av7110, u32 config, int addr, u8 *val, int count) | 393 | static inline void mwdebi(struct av7110 *av7110, u32 config, int addr, |
394 | const u8 *val, int count) | ||
394 | { | 395 | { |
395 | memcpy(av7110->debi_virt, val, count); | 396 | memcpy(av7110->debi_virt, val, count); |
396 | av7110_debiwrite(av7110, config, addr, 0, count); | 397 | av7110_debiwrite(av7110, config, addr, 0, count); |
diff --git a/drivers/media/dvb/ttpci/fdump.c b/drivers/media/dvb/ttpci/fdump.c deleted file mode 100644 index c90001d35e7d..000000000000 --- a/drivers/media/dvb/ttpci/fdump.c +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <sys/types.h> | ||
3 | #include <sys/stat.h> | ||
4 | #include <fcntl.h> | ||
5 | #include <unistd.h> | ||
6 | |||
7 | int main(int argc, char **argv) | ||
8 | { | ||
9 | unsigned char buf[8]; | ||
10 | unsigned int i, count, bytes = 0; | ||
11 | FILE *fd_in, *fd_out; | ||
12 | |||
13 | if (argc != 4) { | ||
14 | fprintf(stderr, "\n\tusage: %s <ucode.bin> <array_name> <output_name>\n\n", argv[0]); | ||
15 | return -1; | ||
16 | } | ||
17 | |||
18 | fd_in = fopen(argv[1], "rb"); | ||
19 | if (fd_in == NULL) { | ||
20 | fprintf(stderr, "firmware file '%s' not found\n", argv[1]); | ||
21 | return -1; | ||
22 | } | ||
23 | |||
24 | fd_out = fopen(argv[3], "w+"); | ||
25 | if (fd_out == NULL) { | ||
26 | fprintf(stderr, "cannot create output file '%s'\n", argv[3]); | ||
27 | return -1; | ||
28 | } | ||
29 | |||
30 | fprintf(fd_out, "\n#include <asm/types.h>\n\nu8 %s [] = {", argv[2]); | ||
31 | |||
32 | while ((count = fread(buf, 1, 8, fd_in)) > 0) { | ||
33 | fprintf(fd_out, "\n\t"); | ||
34 | for (i = 0; i < count; i++, bytes++) | ||
35 | fprintf(fd_out, "0x%02x, ", buf[i]); | ||
36 | } | ||
37 | |||
38 | fprintf(fd_out, "\n};\n\n"); | ||
39 | |||
40 | fclose(fd_in); | ||
41 | fclose(fd_out); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c index d9d974a8f52a..add3395d3248 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c | |||
@@ -1461,7 +1461,6 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw) | |||
1461 | return ret; | 1461 | return ret; |
1462 | } | 1462 | } |
1463 | 1463 | ||
1464 | usb_settoggle(hdw->usb_dev, 0 & 0xf, !(0 & USB_DIR_IN), 0); | ||
1465 | usb_clear_halt(hdw->usb_dev, usb_sndbulkpipe(hdw->usb_dev, 0 & 0x7f)); | 1464 | usb_clear_halt(hdw->usb_dev, usb_sndbulkpipe(hdw->usb_dev, 0 & 0x7f)); |
1466 | 1465 | ||
1467 | pipe = usb_sndctrlpipe(hdw->usb_dev, 0); | 1466 | pipe = usb_sndctrlpipe(hdw->usb_dev, 0); |
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index d184dfab9631..db39f4a52f53 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c | |||
@@ -278,7 +278,7 @@ static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr, | |||
278 | * We only use page mode writes; the alternative is sloooow. This routine | 278 | * We only use page mode writes; the alternative is sloooow. This routine |
279 | * writes at most one page. | 279 | * writes at most one page. |
280 | */ | 280 | */ |
281 | static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf, | 281 | static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf, |
282 | unsigned offset, size_t count) | 282 | unsigned offset, size_t count) |
283 | { | 283 | { |
284 | struct i2c_client *client; | 284 | struct i2c_client *client; |
@@ -347,8 +347,8 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf, | |||
347 | return -ETIMEDOUT; | 347 | return -ETIMEDOUT; |
348 | } | 348 | } |
349 | 349 | ||
350 | static ssize_t at24_write(struct at24_data *at24, | 350 | static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off, |
351 | char *buf, loff_t off, size_t count) | 351 | size_t count) |
352 | { | 352 | { |
353 | ssize_t retval = 0; | 353 | ssize_t retval = 0; |
354 | 354 | ||
@@ -406,7 +406,7 @@ static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf, | |||
406 | return at24_read(at24, buf, offset, count); | 406 | return at24_read(at24, buf, offset, count); |
407 | } | 407 | } |
408 | 408 | ||
409 | static ssize_t at24_macc_write(struct memory_accessor *macc, char *buf, | 409 | static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf, |
410 | off_t offset, size_t count) | 410 | off_t offset, size_t count) |
411 | { | 411 | { |
412 | struct at24_data *at24 = container_of(macc, struct at24_data, macc); | 412 | struct at24_data *at24 = container_of(macc, struct at24_data, macc); |
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 6bc0dac5c1e8..b34cb5f79eea 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c | |||
@@ -140,7 +140,8 @@ at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
140 | 140 | ||
141 | 141 | ||
142 | static ssize_t | 142 | static ssize_t |
143 | at25_ee_write(struct at25_data *at25, char *buf, loff_t off, size_t count) | 143 | at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, |
144 | size_t count) | ||
144 | { | 145 | { |
145 | ssize_t status = 0; | 146 | ssize_t status = 0; |
146 | unsigned written = 0; | 147 | unsigned written = 0; |
@@ -276,7 +277,7 @@ static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf, | |||
276 | return at25_ee_read(at25, buf, offset, count); | 277 | return at25_ee_read(at25, buf, offset, count); |
277 | } | 278 | } |
278 | 279 | ||
279 | static ssize_t at25_mem_write(struct memory_accessor *mem, char *buf, | 280 | static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf, |
280 | off_t offset, size_t count) | 281 | off_t offset, size_t count) |
281 | { | 282 | { |
282 | struct at25_data *at25 = container_of(mem, struct at25_data, mem); | 283 | struct at25_data *at25 = container_of(mem, struct at25_data, mem); |
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 114444cfd496..b94d5f767703 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h | |||
@@ -90,18 +90,21 @@ struct xpc_rsvd_page { | |||
90 | short max_npartitions; /* value of XPC_MAX_PARTITIONS */ | 90 | short max_npartitions; /* value of XPC_MAX_PARTITIONS */ |
91 | u8 version; | 91 | u8 version; |
92 | u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ | 92 | u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ |
93 | unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ | ||
93 | union { | 94 | union { |
94 | unsigned long vars_pa; /* phys address of struct xpc_vars */ | 95 | struct { |
95 | unsigned long activate_gru_mq_desc_gpa; /* phys addr of */ | 96 | unsigned long vars_pa; /* phys addr */ |
96 | /* activate mq's */ | 97 | } sn2; |
97 | /* gru mq descriptor */ | 98 | struct { |
99 | unsigned long heartbeat_gpa; /* phys addr */ | ||
100 | unsigned long activate_gru_mq_desc_gpa; /* phys addr */ | ||
101 | } uv; | ||
98 | } sn; | 102 | } sn; |
99 | unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ | 103 | u64 pad2[9]; /* align to last u64 in 2nd 64-byte cacheline */ |
100 | u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */ | ||
101 | u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */ | 104 | u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */ |
102 | }; | 105 | }; |
103 | 106 | ||
104 | #define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */ | 107 | #define XPC_RP_VERSION _XPC_VERSION(3, 0) /* version 3.0 of the reserved page */ |
105 | 108 | ||
106 | /* | 109 | /* |
107 | * Define the structures by which XPC variables can be exported to other | 110 | * Define the structures by which XPC variables can be exported to other |
@@ -182,6 +185,17 @@ struct xpc_vars_part_sn2 { | |||
182 | (XPC_RP_MACH_NASIDS(_rp) + \ | 185 | (XPC_RP_MACH_NASIDS(_rp) + \ |
183 | xpc_nasid_mask_nlongs)) | 186 | xpc_nasid_mask_nlongs)) |
184 | 187 | ||
188 | |||
189 | /* | ||
190 | * The following structure describes the partition's heartbeat info which | ||
191 | * will be periodically read by other partitions to determine whether this | ||
192 | * XPC is still 'alive'. | ||
193 | */ | ||
194 | struct xpc_heartbeat_uv { | ||
195 | unsigned long value; | ||
196 | unsigned long offline; /* if 0, heartbeat should be changing */ | ||
197 | }; | ||
198 | |||
185 | /* | 199 | /* |
186 | * Info pertinent to a GRU message queue using a watch list for irq generation. | 200 | * Info pertinent to a GRU message queue using a watch list for irq generation. |
187 | */ | 201 | */ |
@@ -198,7 +212,7 @@ struct xpc_gru_mq_uv { | |||
198 | 212 | ||
199 | /* | 213 | /* |
200 | * The activate_mq is used to send/receive GRU messages that affect XPC's | 214 | * The activate_mq is used to send/receive GRU messages that affect XPC's |
201 | * heartbeat, partition active state, and channel state. This is UV only. | 215 | * partition active state and channel state. This is uv only. |
202 | */ | 216 | */ |
203 | struct xpc_activate_mq_msghdr_uv { | 217 | struct xpc_activate_mq_msghdr_uv { |
204 | unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */ | 218 | unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */ |
@@ -210,33 +224,27 @@ struct xpc_activate_mq_msghdr_uv { | |||
210 | 224 | ||
211 | /* activate_mq defined message types */ | 225 | /* activate_mq defined message types */ |
212 | #define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0 | 226 | #define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0 |
213 | #define XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV 1 | ||
214 | #define XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV 2 | ||
215 | #define XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV 3 | ||
216 | 227 | ||
217 | #define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 4 | 228 | #define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 1 |
218 | #define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 5 | 229 | #define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 2 |
219 | 230 | ||
220 | #define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 6 | 231 | #define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 3 |
221 | #define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 7 | 232 | #define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 4 |
222 | #define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 8 | 233 | #define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 5 |
223 | #define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 9 | 234 | #define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 6 |
235 | #define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV 7 | ||
224 | 236 | ||
225 | #define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 10 | 237 | #define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 8 |
226 | #define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 11 | 238 | #define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 9 |
227 | 239 | ||
228 | struct xpc_activate_mq_msg_uv { | 240 | struct xpc_activate_mq_msg_uv { |
229 | struct xpc_activate_mq_msghdr_uv hdr; | 241 | struct xpc_activate_mq_msghdr_uv hdr; |
230 | }; | 242 | }; |
231 | 243 | ||
232 | struct xpc_activate_mq_msg_heartbeat_req_uv { | ||
233 | struct xpc_activate_mq_msghdr_uv hdr; | ||
234 | u64 heartbeat; | ||
235 | }; | ||
236 | |||
237 | struct xpc_activate_mq_msg_activate_req_uv { | 244 | struct xpc_activate_mq_msg_activate_req_uv { |
238 | struct xpc_activate_mq_msghdr_uv hdr; | 245 | struct xpc_activate_mq_msghdr_uv hdr; |
239 | unsigned long rp_gpa; | 246 | unsigned long rp_gpa; |
247 | unsigned long heartbeat_gpa; | ||
240 | unsigned long activate_gru_mq_desc_gpa; | 248 | unsigned long activate_gru_mq_desc_gpa; |
241 | }; | 249 | }; |
242 | 250 | ||
@@ -271,6 +279,11 @@ struct xpc_activate_mq_msg_chctl_openreply_uv { | |||
271 | unsigned long notify_gru_mq_desc_gpa; | 279 | unsigned long notify_gru_mq_desc_gpa; |
272 | }; | 280 | }; |
273 | 281 | ||
282 | struct xpc_activate_mq_msg_chctl_opencomplete_uv { | ||
283 | struct xpc_activate_mq_msghdr_uv hdr; | ||
284 | short ch_number; | ||
285 | }; | ||
286 | |||
274 | /* | 287 | /* |
275 | * Functions registered by add_timer() or called by kernel_thread() only | 288 | * Functions registered by add_timer() or called by kernel_thread() only |
276 | * allow for a single 64-bit argument. The following macros can be used to | 289 | * allow for a single 64-bit argument. The following macros can be used to |
@@ -576,30 +589,32 @@ struct xpc_channel { | |||
576 | 589 | ||
577 | #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ | 590 | #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ |
578 | 591 | ||
579 | #define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ | 592 | #define XPC_C_ROPENCOMPLETE 0x00000002 /* remote open channel complete */ |
580 | #define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ | 593 | #define XPC_C_OPENCOMPLETE 0x00000004 /* local open channel complete */ |
581 | #define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ | 594 | #define XPC_C_ROPENREPLY 0x00000008 /* remote open channel reply */ |
582 | #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ | 595 | #define XPC_C_OPENREPLY 0x00000010 /* local open channel reply */ |
596 | #define XPC_C_ROPENREQUEST 0x00000020 /* remote open channel request */ | ||
597 | #define XPC_C_OPENREQUEST 0x00000040 /* local open channel request */ | ||
583 | 598 | ||
584 | #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ | 599 | #define XPC_C_SETUP 0x00000080 /* channel's msgqueues are alloc'd */ |
585 | #define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ | 600 | #define XPC_C_CONNECTEDCALLOUT 0x00000100 /* connected callout initiated */ |
586 | #define XPC_C_CONNECTEDCALLOUT_MADE \ | 601 | #define XPC_C_CONNECTEDCALLOUT_MADE \ |
587 | 0x00000080 /* connected callout completed */ | 602 | 0x00000200 /* connected callout completed */ |
588 | #define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ | 603 | #define XPC_C_CONNECTED 0x00000400 /* local channel is connected */ |
589 | #define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ | 604 | #define XPC_C_CONNECTING 0x00000800 /* channel is being connected */ |
590 | 605 | ||
591 | #define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ | 606 | #define XPC_C_RCLOSEREPLY 0x00001000 /* remote close channel reply */ |
592 | #define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ | 607 | #define XPC_C_CLOSEREPLY 0x00002000 /* local close channel reply */ |
593 | #define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ | 608 | #define XPC_C_RCLOSEREQUEST 0x00004000 /* remote close channel request */ |
594 | #define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ | 609 | #define XPC_C_CLOSEREQUEST 0x00008000 /* local close channel request */ |
595 | 610 | ||
596 | #define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ | 611 | #define XPC_C_DISCONNECTED 0x00010000 /* channel is disconnected */ |
597 | #define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ | 612 | #define XPC_C_DISCONNECTING 0x00020000 /* channel is being disconnected */ |
598 | #define XPC_C_DISCONNECTINGCALLOUT \ | 613 | #define XPC_C_DISCONNECTINGCALLOUT \ |
599 | 0x00010000 /* disconnecting callout initiated */ | 614 | 0x00040000 /* disconnecting callout initiated */ |
600 | #define XPC_C_DISCONNECTINGCALLOUT_MADE \ | 615 | #define XPC_C_DISCONNECTINGCALLOUT_MADE \ |
601 | 0x00020000 /* disconnecting callout completed */ | 616 | 0x00080000 /* disconnecting callout completed */ |
602 | #define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ | 617 | #define XPC_C_WDISCONNECT 0x00100000 /* waiting for channel disconnect */ |
603 | 618 | ||
604 | /* | 619 | /* |
605 | * The channel control flags (chctl) union consists of a 64-bit variable which | 620 | * The channel control flags (chctl) union consists of a 64-bit variable which |
@@ -618,11 +633,13 @@ union xpc_channel_ctl_flags { | |||
618 | #define XPC_CHCTL_CLOSEREPLY 0x02 | 633 | #define XPC_CHCTL_CLOSEREPLY 0x02 |
619 | #define XPC_CHCTL_OPENREQUEST 0x04 | 634 | #define XPC_CHCTL_OPENREQUEST 0x04 |
620 | #define XPC_CHCTL_OPENREPLY 0x08 | 635 | #define XPC_CHCTL_OPENREPLY 0x08 |
621 | #define XPC_CHCTL_MSGREQUEST 0x10 | 636 | #define XPC_CHCTL_OPENCOMPLETE 0x10 |
637 | #define XPC_CHCTL_MSGREQUEST 0x20 | ||
622 | 638 | ||
623 | #define XPC_OPENCLOSE_CHCTL_FLAGS \ | 639 | #define XPC_OPENCLOSE_CHCTL_FLAGS \ |
624 | (XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \ | 640 | (XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \ |
625 | XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY) | 641 | XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY | \ |
642 | XPC_CHCTL_OPENCOMPLETE) | ||
626 | #define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST | 643 | #define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST |
627 | 644 | ||
628 | static inline int | 645 | static inline int |
@@ -687,6 +704,9 @@ struct xpc_partition_sn2 { | |||
687 | }; | 704 | }; |
688 | 705 | ||
689 | struct xpc_partition_uv { | 706 | struct xpc_partition_uv { |
707 | unsigned long heartbeat_gpa; /* phys addr of partition's heartbeat */ | ||
708 | struct xpc_heartbeat_uv cached_heartbeat; /* cached copy of */ | ||
709 | /* partition's heartbeat */ | ||
690 | unsigned long activate_gru_mq_desc_gpa; /* phys addr of parititon's */ | 710 | unsigned long activate_gru_mq_desc_gpa; /* phys addr of parititon's */ |
691 | /* activate mq's gru mq */ | 711 | /* activate mq's gru mq */ |
692 | /* descriptor */ | 712 | /* descriptor */ |
@@ -698,14 +718,12 @@ struct xpc_partition_uv { | |||
698 | u8 remote_act_state; /* remote partition's act_state */ | 718 | u8 remote_act_state; /* remote partition's act_state */ |
699 | u8 act_state_req; /* act_state request from remote partition */ | 719 | u8 act_state_req; /* act_state request from remote partition */ |
700 | enum xp_retval reason; /* reason for deactivate act_state request */ | 720 | enum xp_retval reason; /* reason for deactivate act_state request */ |
701 | u64 heartbeat; /* incremented by remote partition */ | ||
702 | }; | 721 | }; |
703 | 722 | ||
704 | /* struct xpc_partition_uv flags */ | 723 | /* struct xpc_partition_uv flags */ |
705 | 724 | ||
706 | #define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001 | 725 | #define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000001 |
707 | #define XPC_P_ENGAGED_UV 0x00000002 | 726 | #define XPC_P_ENGAGED_UV 0x00000002 |
708 | #define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000004 | ||
709 | 727 | ||
710 | /* struct xpc_partition_uv act_state change requests */ | 728 | /* struct xpc_partition_uv act_state change requests */ |
711 | 729 | ||
@@ -762,6 +780,62 @@ struct xpc_partition { | |||
762 | 780 | ||
763 | } ____cacheline_aligned; | 781 | } ____cacheline_aligned; |
764 | 782 | ||
783 | struct xpc_arch_operations { | ||
784 | int (*setup_partitions) (void); | ||
785 | void (*teardown_partitions) (void); | ||
786 | void (*process_activate_IRQ_rcvd) (void); | ||
787 | enum xp_retval (*get_partition_rsvd_page_pa) | ||
788 | (void *, u64 *, unsigned long *, size_t *); | ||
789 | int (*setup_rsvd_page) (struct xpc_rsvd_page *); | ||
790 | |||
791 | void (*allow_hb) (short); | ||
792 | void (*disallow_hb) (short); | ||
793 | void (*disallow_all_hbs) (void); | ||
794 | void (*increment_heartbeat) (void); | ||
795 | void (*offline_heartbeat) (void); | ||
796 | void (*online_heartbeat) (void); | ||
797 | void (*heartbeat_init) (void); | ||
798 | void (*heartbeat_exit) (void); | ||
799 | enum xp_retval (*get_remote_heartbeat) (struct xpc_partition *); | ||
800 | |||
801 | void (*request_partition_activation) (struct xpc_rsvd_page *, | ||
802 | unsigned long, int); | ||
803 | void (*request_partition_reactivation) (struct xpc_partition *); | ||
804 | void (*request_partition_deactivation) (struct xpc_partition *); | ||
805 | void (*cancel_partition_deactivation_request) (struct xpc_partition *); | ||
806 | enum xp_retval (*setup_ch_structures) (struct xpc_partition *); | ||
807 | void (*teardown_ch_structures) (struct xpc_partition *); | ||
808 | |||
809 | enum xp_retval (*make_first_contact) (struct xpc_partition *); | ||
810 | |||
811 | u64 (*get_chctl_all_flags) (struct xpc_partition *); | ||
812 | void (*send_chctl_closerequest) (struct xpc_channel *, unsigned long *); | ||
813 | void (*send_chctl_closereply) (struct xpc_channel *, unsigned long *); | ||
814 | void (*send_chctl_openrequest) (struct xpc_channel *, unsigned long *); | ||
815 | void (*send_chctl_openreply) (struct xpc_channel *, unsigned long *); | ||
816 | void (*send_chctl_opencomplete) (struct xpc_channel *, unsigned long *); | ||
817 | void (*process_msg_chctl_flags) (struct xpc_partition *, int); | ||
818 | |||
819 | enum xp_retval (*save_remote_msgqueue_pa) (struct xpc_channel *, | ||
820 | unsigned long); | ||
821 | |||
822 | enum xp_retval (*setup_msg_structures) (struct xpc_channel *); | ||
823 | void (*teardown_msg_structures) (struct xpc_channel *); | ||
824 | |||
825 | void (*indicate_partition_engaged) (struct xpc_partition *); | ||
826 | void (*indicate_partition_disengaged) (struct xpc_partition *); | ||
827 | void (*assume_partition_disengaged) (short); | ||
828 | int (*partition_engaged) (short); | ||
829 | int (*any_partition_engaged) (void); | ||
830 | |||
831 | int (*n_of_deliverable_payloads) (struct xpc_channel *); | ||
832 | enum xp_retval (*send_payload) (struct xpc_channel *, u32, void *, | ||
833 | u16, u8, xpc_notify_func, void *); | ||
834 | void *(*get_deliverable_payload) (struct xpc_channel *); | ||
835 | void (*received_payload) (struct xpc_channel *, void *); | ||
836 | void (*notify_senders_of_disconnect) (struct xpc_channel *); | ||
837 | }; | ||
838 | |||
765 | /* struct xpc_partition act_state values (for XPC HB) */ | 839 | /* struct xpc_partition act_state values (for XPC HB) */ |
766 | 840 | ||
767 | #define XPC_P_AS_INACTIVE 0x00 /* partition is not active */ | 841 | #define XPC_P_AS_INACTIVE 0x00 /* partition is not active */ |
@@ -802,67 +876,17 @@ extern struct xpc_registration xpc_registrations[]; | |||
802 | /* found in xpc_main.c */ | 876 | /* found in xpc_main.c */ |
803 | extern struct device *xpc_part; | 877 | extern struct device *xpc_part; |
804 | extern struct device *xpc_chan; | 878 | extern struct device *xpc_chan; |
879 | extern struct xpc_arch_operations xpc_arch_ops; | ||
805 | extern int xpc_disengage_timelimit; | 880 | extern int xpc_disengage_timelimit; |
806 | extern int xpc_disengage_timedout; | 881 | extern int xpc_disengage_timedout; |
807 | extern int xpc_activate_IRQ_rcvd; | 882 | extern int xpc_activate_IRQ_rcvd; |
808 | extern spinlock_t xpc_activate_IRQ_rcvd_lock; | 883 | extern spinlock_t xpc_activate_IRQ_rcvd_lock; |
809 | extern wait_queue_head_t xpc_activate_IRQ_wq; | 884 | extern wait_queue_head_t xpc_activate_IRQ_wq; |
810 | extern void *xpc_heartbeating_to_mask; | ||
811 | extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); | 885 | extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); |
812 | extern void xpc_activate_partition(struct xpc_partition *); | 886 | extern void xpc_activate_partition(struct xpc_partition *); |
813 | extern void xpc_activate_kthreads(struct xpc_channel *, int); | 887 | extern void xpc_activate_kthreads(struct xpc_channel *, int); |
814 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); | 888 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); |
815 | extern void xpc_disconnect_wait(int); | 889 | extern void xpc_disconnect_wait(int); |
816 | extern int (*xpc_setup_partitions_sn) (void); | ||
817 | extern void (*xpc_teardown_partitions_sn) (void); | ||
818 | extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *, | ||
819 | unsigned long *, | ||
820 | size_t *); | ||
821 | extern int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *); | ||
822 | extern void (*xpc_heartbeat_init) (void); | ||
823 | extern void (*xpc_heartbeat_exit) (void); | ||
824 | extern void (*xpc_increment_heartbeat) (void); | ||
825 | extern void (*xpc_offline_heartbeat) (void); | ||
826 | extern void (*xpc_online_heartbeat) (void); | ||
827 | extern enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *); | ||
828 | extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *); | ||
829 | extern u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *); | ||
830 | extern enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *); | ||
831 | extern void (*xpc_teardown_msg_structures) (struct xpc_channel *); | ||
832 | extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *); | ||
833 | extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int); | ||
834 | extern int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *); | ||
835 | extern void *(*xpc_get_deliverable_payload) (struct xpc_channel *); | ||
836 | extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, | ||
837 | unsigned long, int); | ||
838 | extern void (*xpc_request_partition_reactivation) (struct xpc_partition *); | ||
839 | extern void (*xpc_request_partition_deactivation) (struct xpc_partition *); | ||
840 | extern void (*xpc_cancel_partition_deactivation_request) ( | ||
841 | struct xpc_partition *); | ||
842 | extern void (*xpc_process_activate_IRQ_rcvd) (void); | ||
843 | extern enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *); | ||
844 | extern void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *); | ||
845 | |||
846 | extern void (*xpc_indicate_partition_engaged) (struct xpc_partition *); | ||
847 | extern int (*xpc_partition_engaged) (short); | ||
848 | extern int (*xpc_any_partition_engaged) (void); | ||
849 | extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *); | ||
850 | extern void (*xpc_assume_partition_disengaged) (short); | ||
851 | |||
852 | extern void (*xpc_send_chctl_closerequest) (struct xpc_channel *, | ||
853 | unsigned long *); | ||
854 | extern void (*xpc_send_chctl_closereply) (struct xpc_channel *, | ||
855 | unsigned long *); | ||
856 | extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *, | ||
857 | unsigned long *); | ||
858 | extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *); | ||
859 | |||
860 | extern enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *, | ||
861 | unsigned long); | ||
862 | |||
863 | extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *, | ||
864 | u16, u8, xpc_notify_func, void *); | ||
865 | extern void (*xpc_received_payload) (struct xpc_channel *, void *); | ||
866 | 890 | ||
867 | /* found in xpc_sn2.c */ | 891 | /* found in xpc_sn2.c */ |
868 | extern int xpc_init_sn2(void); | 892 | extern int xpc_init_sn2(void); |
@@ -909,40 +933,6 @@ extern void xpc_disconnect_channel(const int, struct xpc_channel *, | |||
909 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); | 933 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); |
910 | extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); | 934 | extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); |
911 | 935 | ||
912 | static inline int | ||
913 | xpc_hb_allowed(short partid, void *heartbeating_to_mask) | ||
914 | { | ||
915 | return test_bit(partid, heartbeating_to_mask); | ||
916 | } | ||
917 | |||
918 | static inline int | ||
919 | xpc_any_hbs_allowed(void) | ||
920 | { | ||
921 | DBUG_ON(xpc_heartbeating_to_mask == NULL); | ||
922 | return !bitmap_empty(xpc_heartbeating_to_mask, xp_max_npartitions); | ||
923 | } | ||
924 | |||
925 | static inline void | ||
926 | xpc_allow_hb(short partid) | ||
927 | { | ||
928 | DBUG_ON(xpc_heartbeating_to_mask == NULL); | ||
929 | set_bit(partid, xpc_heartbeating_to_mask); | ||
930 | } | ||
931 | |||
932 | static inline void | ||
933 | xpc_disallow_hb(short partid) | ||
934 | { | ||
935 | DBUG_ON(xpc_heartbeating_to_mask == NULL); | ||
936 | clear_bit(partid, xpc_heartbeating_to_mask); | ||
937 | } | ||
938 | |||
939 | static inline void | ||
940 | xpc_disallow_all_hbs(void) | ||
941 | { | ||
942 | DBUG_ON(xpc_heartbeating_to_mask == NULL); | ||
943 | bitmap_zero(xpc_heartbeating_to_mask, xp_max_npartitions); | ||
944 | } | ||
945 | |||
946 | static inline void | 936 | static inline void |
947 | xpc_wakeup_channel_mgr(struct xpc_partition *part) | 937 | xpc_wakeup_channel_mgr(struct xpc_partition *part) |
948 | { | 938 | { |
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 99a2534c38a1..652593fc486d 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
@@ -39,34 +39,38 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
39 | 39 | ||
40 | if (!(ch->flags & XPC_C_SETUP)) { | 40 | if (!(ch->flags & XPC_C_SETUP)) { |
41 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 41 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
42 | ret = xpc_setup_msg_structures(ch); | 42 | ret = xpc_arch_ops.setup_msg_structures(ch); |
43 | spin_lock_irqsave(&ch->lock, *irq_flags); | 43 | spin_lock_irqsave(&ch->lock, *irq_flags); |
44 | 44 | ||
45 | if (ret != xpSuccess) | 45 | if (ret != xpSuccess) |
46 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); | 46 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); |
47 | else | ||
48 | ch->flags |= XPC_C_SETUP; | ||
47 | 49 | ||
48 | ch->flags |= XPC_C_SETUP; | 50 | if (ch->flags & XPC_C_DISCONNECTING) |
49 | |||
50 | if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) | ||
51 | return; | 51 | return; |
52 | } | 52 | } |
53 | 53 | ||
54 | if (!(ch->flags & XPC_C_OPENREPLY)) { | 54 | if (!(ch->flags & XPC_C_OPENREPLY)) { |
55 | ch->flags |= XPC_C_OPENREPLY; | 55 | ch->flags |= XPC_C_OPENREPLY; |
56 | xpc_send_chctl_openreply(ch, irq_flags); | 56 | xpc_arch_ops.send_chctl_openreply(ch, irq_flags); |
57 | } | 57 | } |
58 | 58 | ||
59 | if (!(ch->flags & XPC_C_ROPENREPLY)) | 59 | if (!(ch->flags & XPC_C_ROPENREPLY)) |
60 | return; | 60 | return; |
61 | 61 | ||
62 | ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ | 62 | if (!(ch->flags & XPC_C_OPENCOMPLETE)) { |
63 | ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED); | ||
64 | xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags); | ||
65 | } | ||
66 | |||
67 | if (!(ch->flags & XPC_C_ROPENCOMPLETE)) | ||
68 | return; | ||
63 | 69 | ||
64 | dev_info(xpc_chan, "channel %d to partition %d connected\n", | 70 | dev_info(xpc_chan, "channel %d to partition %d connected\n", |
65 | ch->number, ch->partid); | 71 | ch->number, ch->partid); |
66 | 72 | ||
67 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 73 | ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ |
68 | xpc_create_kthreads(ch, 1, 0); | ||
69 | spin_lock_irqsave(&ch->lock, *irq_flags); | ||
70 | } | 74 | } |
71 | 75 | ||
72 | /* | 76 | /* |
@@ -96,7 +100,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
96 | 100 | ||
97 | if (part->act_state == XPC_P_AS_DEACTIVATING) { | 101 | if (part->act_state == XPC_P_AS_DEACTIVATING) { |
98 | /* can't proceed until the other side disengages from us */ | 102 | /* can't proceed until the other side disengages from us */ |
99 | if (xpc_partition_engaged(ch->partid)) | 103 | if (xpc_arch_ops.partition_engaged(ch->partid)) |
100 | return; | 104 | return; |
101 | 105 | ||
102 | } else { | 106 | } else { |
@@ -108,7 +112,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
108 | 112 | ||
109 | if (!(ch->flags & XPC_C_CLOSEREPLY)) { | 113 | if (!(ch->flags & XPC_C_CLOSEREPLY)) { |
110 | ch->flags |= XPC_C_CLOSEREPLY; | 114 | ch->flags |= XPC_C_CLOSEREPLY; |
111 | xpc_send_chctl_closereply(ch, irq_flags); | 115 | xpc_arch_ops.send_chctl_closereply(ch, irq_flags); |
112 | } | 116 | } |
113 | 117 | ||
114 | if (!(ch->flags & XPC_C_RCLOSEREPLY)) | 118 | if (!(ch->flags & XPC_C_RCLOSEREPLY)) |
@@ -118,7 +122,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
118 | /* wake those waiting for notify completion */ | 122 | /* wake those waiting for notify completion */ |
119 | if (atomic_read(&ch->n_to_notify) > 0) { | 123 | if (atomic_read(&ch->n_to_notify) > 0) { |
120 | /* we do callout while holding ch->lock, callout can't block */ | 124 | /* we do callout while holding ch->lock, callout can't block */ |
121 | xpc_notify_senders_of_disconnect(ch); | 125 | xpc_arch_ops.notify_senders_of_disconnect(ch); |
122 | } | 126 | } |
123 | 127 | ||
124 | /* both sides are disconnected now */ | 128 | /* both sides are disconnected now */ |
@@ -132,7 +136,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
132 | DBUG_ON(atomic_read(&ch->n_to_notify) != 0); | 136 | DBUG_ON(atomic_read(&ch->n_to_notify) != 0); |
133 | 137 | ||
134 | /* it's now safe to free the channel's message queues */ | 138 | /* it's now safe to free the channel's message queues */ |
135 | xpc_teardown_msg_structures(ch); | 139 | xpc_arch_ops.teardown_msg_structures(ch); |
136 | 140 | ||
137 | ch->func = NULL; | 141 | ch->func = NULL; |
138 | ch->key = NULL; | 142 | ch->key = NULL; |
@@ -144,8 +148,9 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
144 | 148 | ||
145 | /* | 149 | /* |
146 | * Mark the channel disconnected and clear all other flags, including | 150 | * Mark the channel disconnected and clear all other flags, including |
147 | * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but | 151 | * XPC_C_SETUP (because of call to |
148 | * not including XPC_C_WDISCONNECT (if it was set). | 152 | * xpc_arch_ops.teardown_msg_structures()) but not including |
153 | * XPC_C_WDISCONNECT (if it was set). | ||
149 | */ | 154 | */ |
150 | ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); | 155 | ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); |
151 | 156 | ||
@@ -184,6 +189,7 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, | |||
184 | struct xpc_channel *ch = &part->channels[ch_number]; | 189 | struct xpc_channel *ch = &part->channels[ch_number]; |
185 | enum xp_retval reason; | 190 | enum xp_retval reason; |
186 | enum xp_retval ret; | 191 | enum xp_retval ret; |
192 | int create_kthread = 0; | ||
187 | 193 | ||
188 | spin_lock_irqsave(&ch->lock, irq_flags); | 194 | spin_lock_irqsave(&ch->lock, irq_flags); |
189 | 195 | ||
@@ -196,8 +202,7 @@ again: | |||
196 | * has had a chance to see that the channel is disconnected. | 202 | * has had a chance to see that the channel is disconnected. |
197 | */ | 203 | */ |
198 | ch->delayed_chctl_flags |= chctl_flags; | 204 | ch->delayed_chctl_flags |= chctl_flags; |
199 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 205 | goto out; |
200 | return; | ||
201 | } | 206 | } |
202 | 207 | ||
203 | if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) { | 208 | if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) { |
@@ -239,8 +244,7 @@ again: | |||
239 | XPC_CHCTL_CLOSEREQUEST; | 244 | XPC_CHCTL_CLOSEREQUEST; |
240 | spin_unlock(&part->chctl_lock); | 245 | spin_unlock(&part->chctl_lock); |
241 | } | 246 | } |
242 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 247 | goto out; |
243 | return; | ||
244 | } | 248 | } |
245 | 249 | ||
246 | XPC_SET_REASON(ch, 0, 0); | 250 | XPC_SET_REASON(ch, 0, 0); |
@@ -250,7 +254,8 @@ again: | |||
250 | ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); | 254 | ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); |
251 | } | 255 | } |
252 | 256 | ||
253 | chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY); | 257 | chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY | |
258 | XPC_CHCTL_OPENCOMPLETE); | ||
254 | 259 | ||
255 | /* | 260 | /* |
256 | * The meaningful CLOSEREQUEST connection state fields are: | 261 | * The meaningful CLOSEREQUEST connection state fields are: |
@@ -269,8 +274,7 @@ again: | |||
269 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); | 274 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); |
270 | 275 | ||
271 | DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY); | 276 | DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY); |
272 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 277 | goto out; |
273 | return; | ||
274 | } | 278 | } |
275 | 279 | ||
276 | xpc_process_disconnect(ch, &irq_flags); | 280 | xpc_process_disconnect(ch, &irq_flags); |
@@ -283,8 +287,7 @@ again: | |||
283 | 287 | ||
284 | if (ch->flags & XPC_C_DISCONNECTED) { | 288 | if (ch->flags & XPC_C_DISCONNECTED) { |
285 | DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING); | 289 | DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING); |
286 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 290 | goto out; |
287 | return; | ||
288 | } | 291 | } |
289 | 292 | ||
290 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); | 293 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); |
@@ -299,8 +302,7 @@ again: | |||
299 | XPC_CHCTL_CLOSEREPLY; | 302 | XPC_CHCTL_CLOSEREPLY; |
300 | spin_unlock(&part->chctl_lock); | 303 | spin_unlock(&part->chctl_lock); |
301 | } | 304 | } |
302 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 305 | goto out; |
303 | return; | ||
304 | } | 306 | } |
305 | 307 | ||
306 | ch->flags |= XPC_C_RCLOSEREPLY; | 308 | ch->flags |= XPC_C_RCLOSEREPLY; |
@@ -320,14 +322,12 @@ again: | |||
320 | 322 | ||
321 | if (part->act_state == XPC_P_AS_DEACTIVATING || | 323 | if (part->act_state == XPC_P_AS_DEACTIVATING || |
322 | (ch->flags & XPC_C_ROPENREQUEST)) { | 324 | (ch->flags & XPC_C_ROPENREQUEST)) { |
323 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 325 | goto out; |
324 | return; | ||
325 | } | 326 | } |
326 | 327 | ||
327 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { | 328 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { |
328 | ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; | 329 | ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; |
329 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 330 | goto out; |
330 | return; | ||
331 | } | 331 | } |
332 | DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | | 332 | DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | |
333 | XPC_C_OPENREQUEST))); | 333 | XPC_C_OPENREQUEST))); |
@@ -341,8 +341,7 @@ again: | |||
341 | */ | 341 | */ |
342 | if (args->entry_size == 0 || args->local_nentries == 0) { | 342 | if (args->entry_size == 0 || args->local_nentries == 0) { |
343 | /* assume OPENREQUEST was delayed by mistake */ | 343 | /* assume OPENREQUEST was delayed by mistake */ |
344 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 344 | goto out; |
345 | return; | ||
346 | } | 345 | } |
347 | 346 | ||
348 | ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); | 347 | ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); |
@@ -352,8 +351,7 @@ again: | |||
352 | if (args->entry_size != ch->entry_size) { | 351 | if (args->entry_size != ch->entry_size) { |
353 | XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, | 352 | XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, |
354 | &irq_flags); | 353 | &irq_flags); |
355 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 354 | goto out; |
356 | return; | ||
357 | } | 355 | } |
358 | } else { | 356 | } else { |
359 | ch->entry_size = args->entry_size; | 357 | ch->entry_size = args->entry_size; |
@@ -375,15 +373,13 @@ again: | |||
375 | args->local_msgqueue_pa, args->local_nentries, | 373 | args->local_msgqueue_pa, args->local_nentries, |
376 | args->remote_nentries, ch->partid, ch->number); | 374 | args->remote_nentries, ch->partid, ch->number); |
377 | 375 | ||
378 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { | 376 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) |
379 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 377 | goto out; |
380 | return; | 378 | |
381 | } | ||
382 | if (!(ch->flags & XPC_C_OPENREQUEST)) { | 379 | if (!(ch->flags & XPC_C_OPENREQUEST)) { |
383 | XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, | 380 | XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, |
384 | &irq_flags); | 381 | &irq_flags); |
385 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 382 | goto out; |
386 | return; | ||
387 | } | 383 | } |
388 | 384 | ||
389 | DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); | 385 | DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); |
@@ -400,11 +396,11 @@ again: | |||
400 | DBUG_ON(args->local_nentries == 0); | 396 | DBUG_ON(args->local_nentries == 0); |
401 | DBUG_ON(args->remote_nentries == 0); | 397 | DBUG_ON(args->remote_nentries == 0); |
402 | 398 | ||
403 | ret = xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa); | 399 | ret = xpc_arch_ops.save_remote_msgqueue_pa(ch, |
400 | args->local_msgqueue_pa); | ||
404 | if (ret != xpSuccess) { | 401 | if (ret != xpSuccess) { |
405 | XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags); | 402 | XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags); |
406 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 403 | goto out; |
407 | return; | ||
408 | } | 404 | } |
409 | ch->flags |= XPC_C_ROPENREPLY; | 405 | ch->flags |= XPC_C_ROPENREPLY; |
410 | 406 | ||
@@ -430,7 +426,36 @@ again: | |||
430 | xpc_process_connect(ch, &irq_flags); | 426 | xpc_process_connect(ch, &irq_flags); |
431 | } | 427 | } |
432 | 428 | ||
429 | if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) { | ||
430 | |||
431 | dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from " | ||
432 | "partid=%d, channel=%d\n", ch->partid, ch->number); | ||
433 | |||
434 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) | ||
435 | goto out; | ||
436 | |||
437 | if (!(ch->flags & XPC_C_OPENREQUEST) || | ||
438 | !(ch->flags & XPC_C_OPENREPLY)) { | ||
439 | XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, | ||
440 | &irq_flags); | ||
441 | goto out; | ||
442 | } | ||
443 | |||
444 | DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); | ||
445 | DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY)); | ||
446 | DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); | ||
447 | |||
448 | ch->flags |= XPC_C_ROPENCOMPLETE; | ||
449 | |||
450 | xpc_process_connect(ch, &irq_flags); | ||
451 | create_kthread = 1; | ||
452 | } | ||
453 | |||
454 | out: | ||
433 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 455 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
456 | |||
457 | if (create_kthread) | ||
458 | xpc_create_kthreads(ch, 1, 0); | ||
434 | } | 459 | } |
435 | 460 | ||
436 | /* | 461 | /* |
@@ -508,7 +533,7 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
508 | /* initiate the connection */ | 533 | /* initiate the connection */ |
509 | 534 | ||
510 | ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); | 535 | ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); |
511 | xpc_send_chctl_openrequest(ch, &irq_flags); | 536 | xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags); |
512 | 537 | ||
513 | xpc_process_connect(ch, &irq_flags); | 538 | xpc_process_connect(ch, &irq_flags); |
514 | 539 | ||
@@ -526,7 +551,7 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part) | |||
526 | int ch_number; | 551 | int ch_number; |
527 | u32 ch_flags; | 552 | u32 ch_flags; |
528 | 553 | ||
529 | chctl.all_flags = xpc_get_chctl_all_flags(part); | 554 | chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part); |
530 | 555 | ||
531 | /* | 556 | /* |
532 | * Initiate channel connections for registered channels. | 557 | * Initiate channel connections for registered channels. |
@@ -564,10 +589,6 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part) | |||
564 | if (!(ch_flags & XPC_C_OPENREQUEST)) { | 589 | if (!(ch_flags & XPC_C_OPENREQUEST)) { |
565 | DBUG_ON(ch_flags & XPC_C_SETUP); | 590 | DBUG_ON(ch_flags & XPC_C_SETUP); |
566 | (void)xpc_connect_channel(ch); | 591 | (void)xpc_connect_channel(ch); |
567 | } else { | ||
568 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
569 | xpc_process_connect(ch, &irq_flags); | ||
570 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
571 | } | 592 | } |
572 | continue; | 593 | continue; |
573 | } | 594 | } |
@@ -579,7 +600,7 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part) | |||
579 | */ | 600 | */ |
580 | 601 | ||
581 | if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS) | 602 | if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS) |
582 | xpc_process_msg_chctl_flags(part, ch_number); | 603 | xpc_arch_ops.process_msg_chctl_flags(part, ch_number); |
583 | } | 604 | } |
584 | } | 605 | } |
585 | 606 | ||
@@ -755,7 +776,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
755 | XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | | 776 | XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | |
756 | XPC_C_CONNECTING | XPC_C_CONNECTED); | 777 | XPC_C_CONNECTING | XPC_C_CONNECTED); |
757 | 778 | ||
758 | xpc_send_chctl_closerequest(ch, irq_flags); | 779 | xpc_arch_ops.send_chctl_closerequest(ch, irq_flags); |
759 | 780 | ||
760 | if (channel_was_connected) | 781 | if (channel_was_connected) |
761 | ch->flags |= XPC_C_WASCONNECTED; | 782 | ch->flags |= XPC_C_WASCONNECTED; |
@@ -862,8 +883,8 @@ xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, | |||
862 | DBUG_ON(payload == NULL); | 883 | DBUG_ON(payload == NULL); |
863 | 884 | ||
864 | if (xpc_part_ref(part)) { | 885 | if (xpc_part_ref(part)) { |
865 | ret = xpc_send_payload(&part->channels[ch_number], flags, | 886 | ret = xpc_arch_ops.send_payload(&part->channels[ch_number], |
866 | payload, payload_size, 0, NULL, NULL); | 887 | flags, payload, payload_size, 0, NULL, NULL); |
867 | xpc_part_deref(part); | 888 | xpc_part_deref(part); |
868 | } | 889 | } |
869 | 890 | ||
@@ -914,9 +935,8 @@ xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload, | |||
914 | DBUG_ON(func == NULL); | 935 | DBUG_ON(func == NULL); |
915 | 936 | ||
916 | if (xpc_part_ref(part)) { | 937 | if (xpc_part_ref(part)) { |
917 | ret = xpc_send_payload(&part->channels[ch_number], flags, | 938 | ret = xpc_arch_ops.send_payload(&part->channels[ch_number], |
918 | payload, payload_size, XPC_N_CALL, func, | 939 | flags, payload, payload_size, XPC_N_CALL, func, key); |
919 | key); | ||
920 | xpc_part_deref(part); | 940 | xpc_part_deref(part); |
921 | } | 941 | } |
922 | return ret; | 942 | return ret; |
@@ -930,7 +950,7 @@ xpc_deliver_payload(struct xpc_channel *ch) | |||
930 | { | 950 | { |
931 | void *payload; | 951 | void *payload; |
932 | 952 | ||
933 | payload = xpc_get_deliverable_payload(ch); | 953 | payload = xpc_arch_ops.get_deliverable_payload(ch); |
934 | if (payload != NULL) { | 954 | if (payload != NULL) { |
935 | 955 | ||
936 | /* | 956 | /* |
@@ -984,7 +1004,7 @@ xpc_initiate_received(short partid, int ch_number, void *payload) | |||
984 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); | 1004 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); |
985 | 1005 | ||
986 | ch = &part->channels[ch_number]; | 1006 | ch = &part->channels[ch_number]; |
987 | xpc_received_payload(ch, payload); | 1007 | xpc_arch_ops.received_payload(ch, payload); |
988 | 1008 | ||
989 | /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ | 1009 | /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ |
990 | xpc_msgqueue_deref(ch); | 1010 | xpc_msgqueue_deref(ch); |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 1ab9fda87fab..fd3688a3e23f 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
@@ -150,7 +150,6 @@ DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); | |||
150 | 150 | ||
151 | static unsigned long xpc_hb_check_timeout; | 151 | static unsigned long xpc_hb_check_timeout; |
152 | static struct timer_list xpc_hb_timer; | 152 | static struct timer_list xpc_hb_timer; |
153 | void *xpc_heartbeating_to_mask; | ||
154 | 153 | ||
155 | /* notification that the xpc_hb_checker thread has exited */ | 154 | /* notification that the xpc_hb_checker thread has exited */ |
156 | static DECLARE_COMPLETION(xpc_hb_checker_exited); | 155 | static DECLARE_COMPLETION(xpc_hb_checker_exited); |
@@ -170,62 +169,7 @@ static struct notifier_block xpc_die_notifier = { | |||
170 | .notifier_call = xpc_system_die, | 169 | .notifier_call = xpc_system_die, |
171 | }; | 170 | }; |
172 | 171 | ||
173 | int (*xpc_setup_partitions_sn) (void); | 172 | struct xpc_arch_operations xpc_arch_ops; |
174 | void (*xpc_teardown_partitions_sn) (void); | ||
175 | enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, | ||
176 | unsigned long *rp_pa, | ||
177 | size_t *len); | ||
178 | int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp); | ||
179 | void (*xpc_heartbeat_init) (void); | ||
180 | void (*xpc_heartbeat_exit) (void); | ||
181 | void (*xpc_increment_heartbeat) (void); | ||
182 | void (*xpc_offline_heartbeat) (void); | ||
183 | void (*xpc_online_heartbeat) (void); | ||
184 | enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part); | ||
185 | |||
186 | enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); | ||
187 | void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch); | ||
188 | u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part); | ||
189 | enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch); | ||
190 | void (*xpc_teardown_msg_structures) (struct xpc_channel *ch); | ||
191 | void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number); | ||
192 | int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch); | ||
193 | void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch); | ||
194 | |||
195 | void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, | ||
196 | unsigned long remote_rp_pa, | ||
197 | int nasid); | ||
198 | void (*xpc_request_partition_reactivation) (struct xpc_partition *part); | ||
199 | void (*xpc_request_partition_deactivation) (struct xpc_partition *part); | ||
200 | void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); | ||
201 | |||
202 | void (*xpc_process_activate_IRQ_rcvd) (void); | ||
203 | enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part); | ||
204 | void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part); | ||
205 | |||
206 | void (*xpc_indicate_partition_engaged) (struct xpc_partition *part); | ||
207 | int (*xpc_partition_engaged) (short partid); | ||
208 | int (*xpc_any_partition_engaged) (void); | ||
209 | void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part); | ||
210 | void (*xpc_assume_partition_disengaged) (short partid); | ||
211 | |||
212 | void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch, | ||
213 | unsigned long *irq_flags); | ||
214 | void (*xpc_send_chctl_closereply) (struct xpc_channel *ch, | ||
215 | unsigned long *irq_flags); | ||
216 | void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch, | ||
217 | unsigned long *irq_flags); | ||
218 | void (*xpc_send_chctl_openreply) (struct xpc_channel *ch, | ||
219 | unsigned long *irq_flags); | ||
220 | |||
221 | enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, | ||
222 | unsigned long msgqueue_pa); | ||
223 | |||
224 | enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags, | ||
225 | void *payload, u16 payload_size, | ||
226 | u8 notify_type, xpc_notify_func func, | ||
227 | void *key); | ||
228 | void (*xpc_received_payload) (struct xpc_channel *ch, void *payload); | ||
229 | 173 | ||
230 | /* | 174 | /* |
231 | * Timer function to enforce the timelimit on the partition disengage. | 175 | * Timer function to enforce the timelimit on the partition disengage. |
@@ -240,7 +184,7 @@ xpc_timeout_partition_disengage(unsigned long data) | |||
240 | (void)xpc_partition_disengaged(part); | 184 | (void)xpc_partition_disengaged(part); |
241 | 185 | ||
242 | DBUG_ON(part->disengage_timeout != 0); | 186 | DBUG_ON(part->disengage_timeout != 0); |
243 | DBUG_ON(xpc_partition_engaged(XPC_PARTID(part))); | 187 | DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); |
244 | } | 188 | } |
245 | 189 | ||
246 | /* | 190 | /* |
@@ -251,7 +195,7 @@ xpc_timeout_partition_disengage(unsigned long data) | |||
251 | static void | 195 | static void |
252 | xpc_hb_beater(unsigned long dummy) | 196 | xpc_hb_beater(unsigned long dummy) |
253 | { | 197 | { |
254 | xpc_increment_heartbeat(); | 198 | xpc_arch_ops.increment_heartbeat(); |
255 | 199 | ||
256 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) | 200 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) |
257 | wake_up_interruptible(&xpc_activate_IRQ_wq); | 201 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
@@ -263,7 +207,7 @@ xpc_hb_beater(unsigned long dummy) | |||
263 | static void | 207 | static void |
264 | xpc_start_hb_beater(void) | 208 | xpc_start_hb_beater(void) |
265 | { | 209 | { |
266 | xpc_heartbeat_init(); | 210 | xpc_arch_ops.heartbeat_init(); |
267 | init_timer(&xpc_hb_timer); | 211 | init_timer(&xpc_hb_timer); |
268 | xpc_hb_timer.function = xpc_hb_beater; | 212 | xpc_hb_timer.function = xpc_hb_beater; |
269 | xpc_hb_beater(0); | 213 | xpc_hb_beater(0); |
@@ -273,7 +217,7 @@ static void | |||
273 | xpc_stop_hb_beater(void) | 217 | xpc_stop_hb_beater(void) |
274 | { | 218 | { |
275 | del_timer_sync(&xpc_hb_timer); | 219 | del_timer_sync(&xpc_hb_timer); |
276 | xpc_heartbeat_exit(); | 220 | xpc_arch_ops.heartbeat_exit(); |
277 | } | 221 | } |
278 | 222 | ||
279 | /* | 223 | /* |
@@ -302,7 +246,7 @@ xpc_check_remote_hb(void) | |||
302 | continue; | 246 | continue; |
303 | } | 247 | } |
304 | 248 | ||
305 | ret = xpc_get_remote_heartbeat(part); | 249 | ret = xpc_arch_ops.get_remote_heartbeat(part); |
306 | if (ret != xpSuccess) | 250 | if (ret != xpSuccess) |
307 | XPC_DEACTIVATE_PARTITION(part, ret); | 251 | XPC_DEACTIVATE_PARTITION(part, ret); |
308 | } | 252 | } |
@@ -353,7 +297,7 @@ xpc_hb_checker(void *ignore) | |||
353 | force_IRQ = 0; | 297 | force_IRQ = 0; |
354 | dev_dbg(xpc_part, "processing activate IRQs " | 298 | dev_dbg(xpc_part, "processing activate IRQs " |
355 | "received\n"); | 299 | "received\n"); |
356 | xpc_process_activate_IRQ_rcvd(); | 300 | xpc_arch_ops.process_activate_IRQ_rcvd(); |
357 | } | 301 | } |
358 | 302 | ||
359 | /* wait for IRQ or timeout */ | 303 | /* wait for IRQ or timeout */ |
@@ -528,7 +472,7 @@ xpc_setup_ch_structures(struct xpc_partition *part) | |||
528 | init_waitqueue_head(&ch->idle_wq); | 472 | init_waitqueue_head(&ch->idle_wq); |
529 | } | 473 | } |
530 | 474 | ||
531 | ret = xpc_setup_ch_structures_sn(part); | 475 | ret = xpc_arch_ops.setup_ch_structures(part); |
532 | if (ret != xpSuccess) | 476 | if (ret != xpSuccess) |
533 | goto out_2; | 477 | goto out_2; |
534 | 478 | ||
@@ -572,7 +516,7 @@ xpc_teardown_ch_structures(struct xpc_partition *part) | |||
572 | 516 | ||
573 | /* now we can begin tearing down the infrastructure */ | 517 | /* now we can begin tearing down the infrastructure */ |
574 | 518 | ||
575 | xpc_teardown_ch_structures_sn(part); | 519 | xpc_arch_ops.teardown_ch_structures(part); |
576 | 520 | ||
577 | kfree(part->remote_openclose_args_base); | 521 | kfree(part->remote_openclose_args_base); |
578 | part->remote_openclose_args = NULL; | 522 | part->remote_openclose_args = NULL; |
@@ -620,12 +564,12 @@ xpc_activating(void *__partid) | |||
620 | 564 | ||
621 | dev_dbg(xpc_part, "activating partition %d\n", partid); | 565 | dev_dbg(xpc_part, "activating partition %d\n", partid); |
622 | 566 | ||
623 | xpc_allow_hb(partid); | 567 | xpc_arch_ops.allow_hb(partid); |
624 | 568 | ||
625 | if (xpc_setup_ch_structures(part) == xpSuccess) { | 569 | if (xpc_setup_ch_structures(part) == xpSuccess) { |
626 | (void)xpc_part_ref(part); /* this will always succeed */ | 570 | (void)xpc_part_ref(part); /* this will always succeed */ |
627 | 571 | ||
628 | if (xpc_make_first_contact(part) == xpSuccess) { | 572 | if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { |
629 | xpc_mark_partition_active(part); | 573 | xpc_mark_partition_active(part); |
630 | xpc_channel_mgr(part); | 574 | xpc_channel_mgr(part); |
631 | /* won't return until partition is deactivating */ | 575 | /* won't return until partition is deactivating */ |
@@ -635,12 +579,12 @@ xpc_activating(void *__partid) | |||
635 | xpc_teardown_ch_structures(part); | 579 | xpc_teardown_ch_structures(part); |
636 | } | 580 | } |
637 | 581 | ||
638 | xpc_disallow_hb(partid); | 582 | xpc_arch_ops.disallow_hb(partid); |
639 | xpc_mark_partition_inactive(part); | 583 | xpc_mark_partition_inactive(part); |
640 | 584 | ||
641 | if (part->reason == xpReactivating) { | 585 | if (part->reason == xpReactivating) { |
642 | /* interrupting ourselves results in activating partition */ | 586 | /* interrupting ourselves results in activating partition */ |
643 | xpc_request_partition_reactivation(part); | 587 | xpc_arch_ops.request_partition_reactivation(part); |
644 | } | 588 | } |
645 | 589 | ||
646 | return 0; | 590 | return 0; |
@@ -713,10 +657,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) | |||
713 | static void | 657 | static void |
714 | xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | 658 | xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) |
715 | { | 659 | { |
660 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = | ||
661 | xpc_arch_ops.n_of_deliverable_payloads; | ||
662 | |||
716 | do { | 663 | do { |
717 | /* deliver messages to their intended recipients */ | 664 | /* deliver messages to their intended recipients */ |
718 | 665 | ||
719 | while (xpc_n_of_deliverable_payloads(ch) > 0 && | 666 | while (n_of_deliverable_payloads(ch) > 0 && |
720 | !(ch->flags & XPC_C_DISCONNECTING)) { | 667 | !(ch->flags & XPC_C_DISCONNECTING)) { |
721 | xpc_deliver_payload(ch); | 668 | xpc_deliver_payload(ch); |
722 | } | 669 | } |
@@ -732,7 +679,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | |||
732 | "wait_event_interruptible_exclusive()\n"); | 679 | "wait_event_interruptible_exclusive()\n"); |
733 | 680 | ||
734 | (void)wait_event_interruptible_exclusive(ch->idle_wq, | 681 | (void)wait_event_interruptible_exclusive(ch->idle_wq, |
735 | (xpc_n_of_deliverable_payloads(ch) > 0 || | 682 | (n_of_deliverable_payloads(ch) > 0 || |
736 | (ch->flags & XPC_C_DISCONNECTING))); | 683 | (ch->flags & XPC_C_DISCONNECTING))); |
737 | 684 | ||
738 | atomic_dec(&ch->kthreads_idle); | 685 | atomic_dec(&ch->kthreads_idle); |
@@ -749,6 +696,8 @@ xpc_kthread_start(void *args) | |||
749 | struct xpc_channel *ch; | 696 | struct xpc_channel *ch; |
750 | int n_needed; | 697 | int n_needed; |
751 | unsigned long irq_flags; | 698 | unsigned long irq_flags; |
699 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = | ||
700 | xpc_arch_ops.n_of_deliverable_payloads; | ||
752 | 701 | ||
753 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", | 702 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", |
754 | partid, ch_number); | 703 | partid, ch_number); |
@@ -777,7 +726,7 @@ xpc_kthread_start(void *args) | |||
777 | * additional kthreads to help deliver them. We only | 726 | * additional kthreads to help deliver them. We only |
778 | * need one less than total #of messages to deliver. | 727 | * need one less than total #of messages to deliver. |
779 | */ | 728 | */ |
780 | n_needed = xpc_n_of_deliverable_payloads(ch) - 1; | 729 | n_needed = n_of_deliverable_payloads(ch) - 1; |
781 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) | 730 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) |
782 | xpc_activate_kthreads(ch, n_needed); | 731 | xpc_activate_kthreads(ch, n_needed); |
783 | 732 | ||
@@ -805,7 +754,7 @@ xpc_kthread_start(void *args) | |||
805 | 754 | ||
806 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | 755 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
807 | atomic_dec_return(&part->nchannels_engaged) == 0) { | 756 | atomic_dec_return(&part->nchannels_engaged) == 0) { |
808 | xpc_indicate_partition_disengaged(part); | 757 | xpc_arch_ops.indicate_partition_disengaged(part); |
809 | } | 758 | } |
810 | 759 | ||
811 | xpc_msgqueue_deref(ch); | 760 | xpc_msgqueue_deref(ch); |
@@ -837,6 +786,8 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
837 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); | 786 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
838 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 787 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
839 | struct task_struct *kthread; | 788 | struct task_struct *kthread; |
789 | void (*indicate_partition_disengaged) (struct xpc_partition *) = | ||
790 | xpc_arch_ops.indicate_partition_disengaged; | ||
840 | 791 | ||
841 | while (needed-- > 0) { | 792 | while (needed-- > 0) { |
842 | 793 | ||
@@ -858,7 +809,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
858 | 809 | ||
859 | } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && | 810 | } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && |
860 | atomic_inc_return(&part->nchannels_engaged) == 1) { | 811 | atomic_inc_return(&part->nchannels_engaged) == 1) { |
861 | xpc_indicate_partition_engaged(part); | 812 | xpc_arch_ops.indicate_partition_engaged(part); |
862 | } | 813 | } |
863 | (void)xpc_part_ref(part); | 814 | (void)xpc_part_ref(part); |
864 | xpc_msgqueue_ref(ch); | 815 | xpc_msgqueue_ref(ch); |
@@ -880,7 +831,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
880 | 831 | ||
881 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | 832 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
882 | atomic_dec_return(&part->nchannels_engaged) == 0) { | 833 | atomic_dec_return(&part->nchannels_engaged) == 0) { |
883 | xpc_indicate_partition_disengaged(part); | 834 | indicate_partition_disengaged(part); |
884 | } | 835 | } |
885 | xpc_msgqueue_deref(ch); | 836 | xpc_msgqueue_deref(ch); |
886 | xpc_part_deref(part); | 837 | xpc_part_deref(part); |
@@ -993,13 +944,13 @@ xpc_setup_partitions(void) | |||
993 | atomic_set(&part->references, 0); | 944 | atomic_set(&part->references, 0); |
994 | } | 945 | } |
995 | 946 | ||
996 | return xpc_setup_partitions_sn(); | 947 | return xpc_arch_ops.setup_partitions(); |
997 | } | 948 | } |
998 | 949 | ||
999 | static void | 950 | static void |
1000 | xpc_teardown_partitions(void) | 951 | xpc_teardown_partitions(void) |
1001 | { | 952 | { |
1002 | xpc_teardown_partitions_sn(); | 953 | xpc_arch_ops.teardown_partitions(); |
1003 | kfree(xpc_partitions); | 954 | kfree(xpc_partitions); |
1004 | } | 955 | } |
1005 | 956 | ||
@@ -1055,7 +1006,7 @@ xpc_do_exit(enum xp_retval reason) | |||
1055 | disengage_timeout = part->disengage_timeout; | 1006 | disengage_timeout = part->disengage_timeout; |
1056 | } | 1007 | } |
1057 | 1008 | ||
1058 | if (xpc_any_partition_engaged()) { | 1009 | if (xpc_arch_ops.any_partition_engaged()) { |
1059 | if (time_is_before_jiffies(printmsg_time)) { | 1010 | if (time_is_before_jiffies(printmsg_time)) { |
1060 | dev_info(xpc_part, "waiting for remote " | 1011 | dev_info(xpc_part, "waiting for remote " |
1061 | "partitions to deactivate, timeout in " | 1012 | "partitions to deactivate, timeout in " |
@@ -1086,8 +1037,7 @@ xpc_do_exit(enum xp_retval reason) | |||
1086 | 1037 | ||
1087 | } while (1); | 1038 | } while (1); |
1088 | 1039 | ||
1089 | DBUG_ON(xpc_any_partition_engaged()); | 1040 | DBUG_ON(xpc_arch_ops.any_partition_engaged()); |
1090 | DBUG_ON(xpc_any_hbs_allowed() != 0); | ||
1091 | 1041 | ||
1092 | xpc_teardown_rsvd_page(); | 1042 | xpc_teardown_rsvd_page(); |
1093 | 1043 | ||
@@ -1152,15 +1102,15 @@ xpc_die_deactivate(void) | |||
1152 | /* keep xpc_hb_checker thread from doing anything (just in case) */ | 1102 | /* keep xpc_hb_checker thread from doing anything (just in case) */ |
1153 | xpc_exiting = 1; | 1103 | xpc_exiting = 1; |
1154 | 1104 | ||
1155 | xpc_disallow_all_hbs(); /*indicate we're deactivated */ | 1105 | xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */ |
1156 | 1106 | ||
1157 | for (partid = 0; partid < xp_max_npartitions; partid++) { | 1107 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
1158 | part = &xpc_partitions[partid]; | 1108 | part = &xpc_partitions[partid]; |
1159 | 1109 | ||
1160 | if (xpc_partition_engaged(partid) || | 1110 | if (xpc_arch_ops.partition_engaged(partid) || |
1161 | part->act_state != XPC_P_AS_INACTIVE) { | 1111 | part->act_state != XPC_P_AS_INACTIVE) { |
1162 | xpc_request_partition_deactivation(part); | 1112 | xpc_arch_ops.request_partition_deactivation(part); |
1163 | xpc_indicate_partition_disengaged(part); | 1113 | xpc_arch_ops.indicate_partition_disengaged(part); |
1164 | } | 1114 | } |
1165 | } | 1115 | } |
1166 | 1116 | ||
@@ -1177,7 +1127,7 @@ xpc_die_deactivate(void) | |||
1177 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; | 1127 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; |
1178 | 1128 | ||
1179 | while (1) { | 1129 | while (1) { |
1180 | any_engaged = xpc_any_partition_engaged(); | 1130 | any_engaged = xpc_arch_ops.any_partition_engaged(); |
1181 | if (!any_engaged) { | 1131 | if (!any_engaged) { |
1182 | dev_info(xpc_part, "all partitions have deactivated\n"); | 1132 | dev_info(xpc_part, "all partitions have deactivated\n"); |
1183 | break; | 1133 | break; |
@@ -1186,7 +1136,7 @@ xpc_die_deactivate(void) | |||
1186 | if (!keep_waiting--) { | 1136 | if (!keep_waiting--) { |
1187 | for (partid = 0; partid < xp_max_npartitions; | 1137 | for (partid = 0; partid < xp_max_npartitions; |
1188 | partid++) { | 1138 | partid++) { |
1189 | if (xpc_partition_engaged(partid)) { | 1139 | if (xpc_arch_ops.partition_engaged(partid)) { |
1190 | dev_info(xpc_part, "deactivate from " | 1140 | dev_info(xpc_part, "deactivate from " |
1191 | "remote partition %d timed " | 1141 | "remote partition %d timed " |
1192 | "out\n", partid); | 1142 | "out\n", partid); |
@@ -1233,7 +1183,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) | |||
1233 | /* fall through */ | 1183 | /* fall through */ |
1234 | case DIE_MCA_MONARCH_ENTER: | 1184 | case DIE_MCA_MONARCH_ENTER: |
1235 | case DIE_INIT_MONARCH_ENTER: | 1185 | case DIE_INIT_MONARCH_ENTER: |
1236 | xpc_offline_heartbeat(); | 1186 | xpc_arch_ops.offline_heartbeat(); |
1237 | break; | 1187 | break; |
1238 | 1188 | ||
1239 | case DIE_KDEBUG_LEAVE: | 1189 | case DIE_KDEBUG_LEAVE: |
@@ -1244,7 +1194,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) | |||
1244 | /* fall through */ | 1194 | /* fall through */ |
1245 | case DIE_MCA_MONARCH_LEAVE: | 1195 | case DIE_MCA_MONARCH_LEAVE: |
1246 | case DIE_INIT_MONARCH_LEAVE: | 1196 | case DIE_INIT_MONARCH_LEAVE: |
1247 | xpc_online_heartbeat(); | 1197 | xpc_arch_ops.online_heartbeat(); |
1248 | break; | 1198 | break; |
1249 | } | 1199 | } |
1250 | #else | 1200 | #else |
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 6722f6fe4dc7..65877bc5edaa 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c | |||
@@ -70,6 +70,9 @@ xpc_get_rsvd_page_pa(int nasid) | |||
70 | size_t buf_len = 0; | 70 | size_t buf_len = 0; |
71 | void *buf = buf; | 71 | void *buf = buf; |
72 | void *buf_base = NULL; | 72 | void *buf_base = NULL; |
73 | enum xp_retval (*get_partition_rsvd_page_pa) | ||
74 | (void *, u64 *, unsigned long *, size_t *) = | ||
75 | xpc_arch_ops.get_partition_rsvd_page_pa; | ||
73 | 76 | ||
74 | while (1) { | 77 | while (1) { |
75 | 78 | ||
@@ -79,8 +82,7 @@ xpc_get_rsvd_page_pa(int nasid) | |||
79 | * ??? function or have two versions? Rename rp_pa for UV to | 82 | * ??? function or have two versions? Rename rp_pa for UV to |
80 | * ??? rp_gpa? | 83 | * ??? rp_gpa? |
81 | */ | 84 | */ |
82 | ret = xpc_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, | 85 | ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len); |
83 | &len); | ||
84 | 86 | ||
85 | dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " | 87 | dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " |
86 | "address=0x%016lx, len=0x%016lx\n", ret, | 88 | "address=0x%016lx, len=0x%016lx\n", ret, |
@@ -172,7 +174,7 @@ xpc_setup_rsvd_page(void) | |||
172 | xpc_part_nasids = XPC_RP_PART_NASIDS(rp); | 174 | xpc_part_nasids = XPC_RP_PART_NASIDS(rp); |
173 | xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); | 175 | xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); |
174 | 176 | ||
175 | ret = xpc_setup_rsvd_page_sn(rp); | 177 | ret = xpc_arch_ops.setup_rsvd_page(rp); |
176 | if (ret != 0) | 178 | if (ret != 0) |
177 | return ret; | 179 | return ret; |
178 | 180 | ||
@@ -264,7 +266,7 @@ xpc_partition_disengaged(struct xpc_partition *part) | |||
264 | short partid = XPC_PARTID(part); | 266 | short partid = XPC_PARTID(part); |
265 | int disengaged; | 267 | int disengaged; |
266 | 268 | ||
267 | disengaged = !xpc_partition_engaged(partid); | 269 | disengaged = !xpc_arch_ops.partition_engaged(partid); |
268 | if (part->disengage_timeout) { | 270 | if (part->disengage_timeout) { |
269 | if (!disengaged) { | 271 | if (!disengaged) { |
270 | if (time_is_after_jiffies(part->disengage_timeout)) { | 272 | if (time_is_after_jiffies(part->disengage_timeout)) { |
@@ -280,7 +282,7 @@ xpc_partition_disengaged(struct xpc_partition *part) | |||
280 | dev_info(xpc_part, "deactivate request to remote " | 282 | dev_info(xpc_part, "deactivate request to remote " |
281 | "partition %d timed out\n", partid); | 283 | "partition %d timed out\n", partid); |
282 | xpc_disengage_timedout = 1; | 284 | xpc_disengage_timedout = 1; |
283 | xpc_assume_partition_disengaged(partid); | 285 | xpc_arch_ops.assume_partition_disengaged(partid); |
284 | disengaged = 1; | 286 | disengaged = 1; |
285 | } | 287 | } |
286 | part->disengage_timeout = 0; | 288 | part->disengage_timeout = 0; |
@@ -294,7 +296,7 @@ xpc_partition_disengaged(struct xpc_partition *part) | |||
294 | if (part->act_state != XPC_P_AS_INACTIVE) | 296 | if (part->act_state != XPC_P_AS_INACTIVE) |
295 | xpc_wakeup_channel_mgr(part); | 297 | xpc_wakeup_channel_mgr(part); |
296 | 298 | ||
297 | xpc_cancel_partition_deactivation_request(part); | 299 | xpc_arch_ops.cancel_partition_deactivation_request(part); |
298 | } | 300 | } |
299 | return disengaged; | 301 | return disengaged; |
300 | } | 302 | } |
@@ -339,7 +341,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, | |||
339 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 341 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
340 | if (reason == xpReactivating) { | 342 | if (reason == xpReactivating) { |
341 | /* we interrupt ourselves to reactivate partition */ | 343 | /* we interrupt ourselves to reactivate partition */ |
342 | xpc_request_partition_reactivation(part); | 344 | xpc_arch_ops.request_partition_reactivation(part); |
343 | } | 345 | } |
344 | return; | 346 | return; |
345 | } | 347 | } |
@@ -358,7 +360,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, | |||
358 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 360 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
359 | 361 | ||
360 | /* ask remote partition to deactivate with regard to us */ | 362 | /* ask remote partition to deactivate with regard to us */ |
361 | xpc_request_partition_deactivation(part); | 363 | xpc_arch_ops.request_partition_deactivation(part); |
362 | 364 | ||
363 | /* set a timelimit on the disengage phase of the deactivation request */ | 365 | /* set a timelimit on the disengage phase of the deactivation request */ |
364 | part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ); | 366 | part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ); |
@@ -496,7 +498,7 @@ xpc_discovery(void) | |||
496 | continue; | 498 | continue; |
497 | } | 499 | } |
498 | 500 | ||
499 | xpc_request_partition_activation(remote_rp, | 501 | xpc_arch_ops.request_partition_activation(remote_rp, |
500 | remote_rp_pa, nasid); | 502 | remote_rp_pa, nasid); |
501 | } | 503 | } |
502 | } | 504 | } |
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index eaaa964942de..915a3b495da5 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
@@ -60,14 +60,14 @@ static struct xpc_vars_sn2 *xpc_vars_sn2; | |||
60 | static struct xpc_vars_part_sn2 *xpc_vars_part_sn2; | 60 | static struct xpc_vars_part_sn2 *xpc_vars_part_sn2; |
61 | 61 | ||
62 | static int | 62 | static int |
63 | xpc_setup_partitions_sn_sn2(void) | 63 | xpc_setup_partitions_sn2(void) |
64 | { | 64 | { |
65 | /* nothing needs to be done */ | 65 | /* nothing needs to be done */ |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
68 | 68 | ||
69 | static void | 69 | static void |
70 | xpc_teardown_partitions_sn_sn2(void) | 70 | xpc_teardown_partitions_sn2(void) |
71 | { | 71 | { |
72 | /* nothing needs to be done */ | 72 | /* nothing needs to be done */ |
73 | } | 73 | } |
@@ -431,6 +431,13 @@ xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) | |||
431 | } | 431 | } |
432 | 432 | ||
433 | static void | 433 | static void |
434 | xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch, | ||
435 | unsigned long *irq_flags) | ||
436 | { | ||
437 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENCOMPLETE, irq_flags); | ||
438 | } | ||
439 | |||
440 | static void | ||
434 | xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch) | 441 | xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch) |
435 | { | 442 | { |
436 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL); | 443 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL); |
@@ -621,7 +628,7 @@ xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa, | |||
621 | 628 | ||
622 | 629 | ||
623 | static int | 630 | static int |
624 | xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp) | 631 | xpc_setup_rsvd_page_sn2(struct xpc_rsvd_page *rp) |
625 | { | 632 | { |
626 | struct amo *amos_page; | 633 | struct amo *amos_page; |
627 | int i; | 634 | int i; |
@@ -629,7 +636,7 @@ xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp) | |||
629 | 636 | ||
630 | xpc_vars_sn2 = XPC_RP_VARS(rp); | 637 | xpc_vars_sn2 = XPC_RP_VARS(rp); |
631 | 638 | ||
632 | rp->sn.vars_pa = xp_pa(xpc_vars_sn2); | 639 | rp->sn.sn2.vars_pa = xp_pa(xpc_vars_sn2); |
633 | 640 | ||
634 | /* vars_part array follows immediately after vars */ | 641 | /* vars_part array follows immediately after vars */ |
635 | xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + | 642 | xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + |
@@ -693,6 +700,33 @@ xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp) | |||
693 | return 0; | 700 | return 0; |
694 | } | 701 | } |
695 | 702 | ||
703 | static int | ||
704 | xpc_hb_allowed_sn2(short partid, void *heartbeating_to_mask) | ||
705 | { | ||
706 | return test_bit(partid, heartbeating_to_mask); | ||
707 | } | ||
708 | |||
709 | static void | ||
710 | xpc_allow_hb_sn2(short partid) | ||
711 | { | ||
712 | DBUG_ON(xpc_vars_sn2 == NULL); | ||
713 | set_bit(partid, xpc_vars_sn2->heartbeating_to_mask); | ||
714 | } | ||
715 | |||
716 | static void | ||
717 | xpc_disallow_hb_sn2(short partid) | ||
718 | { | ||
719 | DBUG_ON(xpc_vars_sn2 == NULL); | ||
720 | clear_bit(partid, xpc_vars_sn2->heartbeating_to_mask); | ||
721 | } | ||
722 | |||
723 | static void | ||
724 | xpc_disallow_all_hbs_sn2(void) | ||
725 | { | ||
726 | DBUG_ON(xpc_vars_sn2 == NULL); | ||
727 | bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, xp_max_npartitions); | ||
728 | } | ||
729 | |||
696 | static void | 730 | static void |
697 | xpc_increment_heartbeat_sn2(void) | 731 | xpc_increment_heartbeat_sn2(void) |
698 | { | 732 | { |
@@ -719,7 +753,6 @@ xpc_heartbeat_init_sn2(void) | |||
719 | DBUG_ON(xpc_vars_sn2 == NULL); | 753 | DBUG_ON(xpc_vars_sn2 == NULL); |
720 | 754 | ||
721 | bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); | 755 | bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); |
722 | xpc_heartbeating_to_mask = &xpc_vars_sn2->heartbeating_to_mask[0]; | ||
723 | xpc_online_heartbeat_sn2(); | 756 | xpc_online_heartbeat_sn2(); |
724 | } | 757 | } |
725 | 758 | ||
@@ -751,9 +784,9 @@ xpc_get_remote_heartbeat_sn2(struct xpc_partition *part) | |||
751 | remote_vars->heartbeating_to_mask[0]); | 784 | remote_vars->heartbeating_to_mask[0]); |
752 | 785 | ||
753 | if ((remote_vars->heartbeat == part->last_heartbeat && | 786 | if ((remote_vars->heartbeat == part->last_heartbeat && |
754 | remote_vars->heartbeat_offline == 0) || | 787 | !remote_vars->heartbeat_offline) || |
755 | !xpc_hb_allowed(sn_partition_id, | 788 | !xpc_hb_allowed_sn2(sn_partition_id, |
756 | &remote_vars->heartbeating_to_mask)) { | 789 | remote_vars->heartbeating_to_mask)) { |
757 | ret = xpNoHeartbeat; | 790 | ret = xpNoHeartbeat; |
758 | } else { | 791 | } else { |
759 | part->last_heartbeat = remote_vars->heartbeat; | 792 | part->last_heartbeat = remote_vars->heartbeat; |
@@ -972,7 +1005,7 @@ xpc_identify_activate_IRQ_req_sn2(int nasid) | |||
972 | return; | 1005 | return; |
973 | } | 1006 | } |
974 | 1007 | ||
975 | remote_vars_pa = remote_rp->sn.vars_pa; | 1008 | remote_vars_pa = remote_rp->sn.sn2.vars_pa; |
976 | remote_rp_version = remote_rp->version; | 1009 | remote_rp_version = remote_rp->version; |
977 | remote_rp_ts_jiffies = remote_rp->ts_jiffies; | 1010 | remote_rp_ts_jiffies = remote_rp->ts_jiffies; |
978 | 1011 | ||
@@ -1129,7 +1162,7 @@ xpc_process_activate_IRQ_rcvd_sn2(void) | |||
1129 | * Setup the channel structures that are sn2 specific. | 1162 | * Setup the channel structures that are sn2 specific. |
1130 | */ | 1163 | */ |
1131 | static enum xp_retval | 1164 | static enum xp_retval |
1132 | xpc_setup_ch_structures_sn_sn2(struct xpc_partition *part) | 1165 | xpc_setup_ch_structures_sn2(struct xpc_partition *part) |
1133 | { | 1166 | { |
1134 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; | 1167 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; |
1135 | struct xpc_channel_sn2 *ch_sn2; | 1168 | struct xpc_channel_sn2 *ch_sn2; |
@@ -1251,7 +1284,7 @@ out_1: | |||
1251 | * Teardown the channel structures that are sn2 specific. | 1284 | * Teardown the channel structures that are sn2 specific. |
1252 | */ | 1285 | */ |
1253 | static void | 1286 | static void |
1254 | xpc_teardown_ch_structures_sn_sn2(struct xpc_partition *part) | 1287 | xpc_teardown_ch_structures_sn2(struct xpc_partition *part) |
1255 | { | 1288 | { |
1256 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; | 1289 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; |
1257 | short partid = XPC_PARTID(part); | 1290 | short partid = XPC_PARTID(part); |
@@ -2315,61 +2348,70 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) | |||
2315 | xpc_acknowledge_msgs_sn2(ch, get, msg->flags); | 2348 | xpc_acknowledge_msgs_sn2(ch, get, msg->flags); |
2316 | } | 2349 | } |
2317 | 2350 | ||
2351 | static struct xpc_arch_operations xpc_arch_ops_sn2 = { | ||
2352 | .setup_partitions = xpc_setup_partitions_sn2, | ||
2353 | .teardown_partitions = xpc_teardown_partitions_sn2, | ||
2354 | .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2, | ||
2355 | .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2, | ||
2356 | .setup_rsvd_page = xpc_setup_rsvd_page_sn2, | ||
2357 | |||
2358 | .allow_hb = xpc_allow_hb_sn2, | ||
2359 | .disallow_hb = xpc_disallow_hb_sn2, | ||
2360 | .disallow_all_hbs = xpc_disallow_all_hbs_sn2, | ||
2361 | .increment_heartbeat = xpc_increment_heartbeat_sn2, | ||
2362 | .offline_heartbeat = xpc_offline_heartbeat_sn2, | ||
2363 | .online_heartbeat = xpc_online_heartbeat_sn2, | ||
2364 | .heartbeat_init = xpc_heartbeat_init_sn2, | ||
2365 | .heartbeat_exit = xpc_heartbeat_exit_sn2, | ||
2366 | .get_remote_heartbeat = xpc_get_remote_heartbeat_sn2, | ||
2367 | |||
2368 | .request_partition_activation = | ||
2369 | xpc_request_partition_activation_sn2, | ||
2370 | .request_partition_reactivation = | ||
2371 | xpc_request_partition_reactivation_sn2, | ||
2372 | .request_partition_deactivation = | ||
2373 | xpc_request_partition_deactivation_sn2, | ||
2374 | .cancel_partition_deactivation_request = | ||
2375 | xpc_cancel_partition_deactivation_request_sn2, | ||
2376 | |||
2377 | .setup_ch_structures = xpc_setup_ch_structures_sn2, | ||
2378 | .teardown_ch_structures = xpc_teardown_ch_structures_sn2, | ||
2379 | |||
2380 | .make_first_contact = xpc_make_first_contact_sn2, | ||
2381 | |||
2382 | .get_chctl_all_flags = xpc_get_chctl_all_flags_sn2, | ||
2383 | .send_chctl_closerequest = xpc_send_chctl_closerequest_sn2, | ||
2384 | .send_chctl_closereply = xpc_send_chctl_closereply_sn2, | ||
2385 | .send_chctl_openrequest = xpc_send_chctl_openrequest_sn2, | ||
2386 | .send_chctl_openreply = xpc_send_chctl_openreply_sn2, | ||
2387 | .send_chctl_opencomplete = xpc_send_chctl_opencomplete_sn2, | ||
2388 | .process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2, | ||
2389 | |||
2390 | .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2, | ||
2391 | |||
2392 | .setup_msg_structures = xpc_setup_msg_structures_sn2, | ||
2393 | .teardown_msg_structures = xpc_teardown_msg_structures_sn2, | ||
2394 | |||
2395 | .indicate_partition_engaged = xpc_indicate_partition_engaged_sn2, | ||
2396 | .indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2, | ||
2397 | .partition_engaged = xpc_partition_engaged_sn2, | ||
2398 | .any_partition_engaged = xpc_any_partition_engaged_sn2, | ||
2399 | .assume_partition_disengaged = xpc_assume_partition_disengaged_sn2, | ||
2400 | |||
2401 | .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2, | ||
2402 | .send_payload = xpc_send_payload_sn2, | ||
2403 | .get_deliverable_payload = xpc_get_deliverable_payload_sn2, | ||
2404 | .received_payload = xpc_received_payload_sn2, | ||
2405 | .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2, | ||
2406 | }; | ||
2407 | |||
2318 | int | 2408 | int |
2319 | xpc_init_sn2(void) | 2409 | xpc_init_sn2(void) |
2320 | { | 2410 | { |
2321 | int ret; | 2411 | int ret; |
2322 | size_t buf_size; | 2412 | size_t buf_size; |
2323 | 2413 | ||
2324 | xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2; | 2414 | xpc_arch_ops = xpc_arch_ops_sn2; |
2325 | xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_sn2; | ||
2326 | xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2; | ||
2327 | xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2; | ||
2328 | xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; | ||
2329 | xpc_offline_heartbeat = xpc_offline_heartbeat_sn2; | ||
2330 | xpc_online_heartbeat = xpc_online_heartbeat_sn2; | ||
2331 | xpc_heartbeat_init = xpc_heartbeat_init_sn2; | ||
2332 | xpc_heartbeat_exit = xpc_heartbeat_exit_sn2; | ||
2333 | xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_sn2; | ||
2334 | |||
2335 | xpc_request_partition_activation = xpc_request_partition_activation_sn2; | ||
2336 | xpc_request_partition_reactivation = | ||
2337 | xpc_request_partition_reactivation_sn2; | ||
2338 | xpc_request_partition_deactivation = | ||
2339 | xpc_request_partition_deactivation_sn2; | ||
2340 | xpc_cancel_partition_deactivation_request = | ||
2341 | xpc_cancel_partition_deactivation_request_sn2; | ||
2342 | |||
2343 | xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2; | ||
2344 | xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_sn2; | ||
2345 | xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_sn2; | ||
2346 | xpc_make_first_contact = xpc_make_first_contact_sn2; | ||
2347 | |||
2348 | xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2; | ||
2349 | xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2; | ||
2350 | xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2; | ||
2351 | xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2; | ||
2352 | xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2; | ||
2353 | |||
2354 | xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2; | ||
2355 | |||
2356 | xpc_setup_msg_structures = xpc_setup_msg_structures_sn2; | ||
2357 | xpc_teardown_msg_structures = xpc_teardown_msg_structures_sn2; | ||
2358 | |||
2359 | xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2; | ||
2360 | xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2; | ||
2361 | xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2; | ||
2362 | xpc_get_deliverable_payload = xpc_get_deliverable_payload_sn2; | ||
2363 | |||
2364 | xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2; | ||
2365 | xpc_indicate_partition_disengaged = | ||
2366 | xpc_indicate_partition_disengaged_sn2; | ||
2367 | xpc_partition_engaged = xpc_partition_engaged_sn2; | ||
2368 | xpc_any_partition_engaged = xpc_any_partition_engaged_sn2; | ||
2369 | xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2; | ||
2370 | |||
2371 | xpc_send_payload = xpc_send_payload_sn2; | ||
2372 | xpc_received_payload = xpc_received_payload_sn2; | ||
2373 | 2415 | ||
2374 | if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) { | 2416 | if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) { |
2375 | dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is " | 2417 | dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is " |
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index f7fff4727edb..9172fcdee4e2 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -46,8 +46,7 @@ struct uv_IO_APIC_route_entry { | |||
46 | }; | 46 | }; |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | static atomic64_t xpc_heartbeat_uv; | 49 | static struct xpc_heartbeat_uv *xpc_heartbeat_uv; |
50 | static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); | ||
51 | 50 | ||
52 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) | 51 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) |
53 | #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | 52 | #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ |
@@ -63,7 +62,7 @@ static struct xpc_gru_mq_uv *xpc_activate_mq_uv; | |||
63 | static struct xpc_gru_mq_uv *xpc_notify_mq_uv; | 62 | static struct xpc_gru_mq_uv *xpc_notify_mq_uv; |
64 | 63 | ||
65 | static int | 64 | static int |
66 | xpc_setup_partitions_sn_uv(void) | 65 | xpc_setup_partitions_uv(void) |
67 | { | 66 | { |
68 | short partid; | 67 | short partid; |
69 | struct xpc_partition_uv *part_uv; | 68 | struct xpc_partition_uv *part_uv; |
@@ -79,7 +78,7 @@ xpc_setup_partitions_sn_uv(void) | |||
79 | } | 78 | } |
80 | 79 | ||
81 | static void | 80 | static void |
82 | xpc_teardown_partitions_sn_uv(void) | 81 | xpc_teardown_partitions_uv(void) |
83 | { | 82 | { |
84 | short partid; | 83 | short partid; |
85 | struct xpc_partition_uv *part_uv; | 84 | struct xpc_partition_uv *part_uv; |
@@ -423,41 +422,6 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, | |||
423 | /* syncing of remote_act_state was just done above */ | 422 | /* syncing of remote_act_state was just done above */ |
424 | break; | 423 | break; |
425 | 424 | ||
426 | case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { | ||
427 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
428 | |||
429 | msg = container_of(msg_hdr, | ||
430 | struct xpc_activate_mq_msg_heartbeat_req_uv, | ||
431 | hdr); | ||
432 | part_uv->heartbeat = msg->heartbeat; | ||
433 | break; | ||
434 | } | ||
435 | case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { | ||
436 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
437 | |||
438 | msg = container_of(msg_hdr, | ||
439 | struct xpc_activate_mq_msg_heartbeat_req_uv, | ||
440 | hdr); | ||
441 | part_uv->heartbeat = msg->heartbeat; | ||
442 | |||
443 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
444 | part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; | ||
445 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
446 | break; | ||
447 | } | ||
448 | case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { | ||
449 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
450 | |||
451 | msg = container_of(msg_hdr, | ||
452 | struct xpc_activate_mq_msg_heartbeat_req_uv, | ||
453 | hdr); | ||
454 | part_uv->heartbeat = msg->heartbeat; | ||
455 | |||
456 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
457 | part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; | ||
458 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
459 | break; | ||
460 | } | ||
461 | case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { | 425 | case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { |
462 | struct xpc_activate_mq_msg_activate_req_uv *msg; | 426 | struct xpc_activate_mq_msg_activate_req_uv *msg; |
463 | 427 | ||
@@ -475,6 +439,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, | |||
475 | part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; | 439 | part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; |
476 | part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ | 440 | part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ |
477 | part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; | 441 | part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; |
442 | part_uv->heartbeat_gpa = msg->heartbeat_gpa; | ||
478 | 443 | ||
479 | if (msg->activate_gru_mq_desc_gpa != | 444 | if (msg->activate_gru_mq_desc_gpa != |
480 | part_uv->activate_gru_mq_desc_gpa) { | 445 | part_uv->activate_gru_mq_desc_gpa) { |
@@ -569,6 +534,17 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, | |||
569 | xpc_wakeup_channel_mgr(part); | 534 | xpc_wakeup_channel_mgr(part); |
570 | break; | 535 | break; |
571 | } | 536 | } |
537 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { | ||
538 | struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; | ||
539 | |||
540 | msg = container_of(msg_hdr, struct | ||
541 | xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); | ||
542 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | ||
543 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; | ||
544 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
545 | |||
546 | xpc_wakeup_channel_mgr(part); | ||
547 | } | ||
572 | case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: | 548 | case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: |
573 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | 549 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); |
574 | part_uv->flags |= XPC_P_ENGAGED_UV; | 550 | part_uv->flags |= XPC_P_ENGAGED_UV; |
@@ -759,7 +735,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) | |||
759 | 735 | ||
760 | /* | 736 | /* |
761 | * !!! Make our side think that the remote partition sent an activate | 737 | * !!! Make our side think that the remote partition sent an activate |
762 | * !!! message our way by doing what the activate IRQ handler would | 738 | * !!! mq message our way by doing what the activate IRQ handler would |
763 | * !!! do had one really been sent. | 739 | * !!! do had one really been sent. |
764 | */ | 740 | */ |
765 | 741 | ||
@@ -806,90 +782,82 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, | |||
806 | } | 782 | } |
807 | 783 | ||
808 | static int | 784 | static int |
809 | xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) | 785 | xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp) |
810 | { | 786 | { |
811 | rp->sn.activate_gru_mq_desc_gpa = | 787 | xpc_heartbeat_uv = |
788 | &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat; | ||
789 | rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); | ||
790 | rp->sn.uv.activate_gru_mq_desc_gpa = | ||
812 | uv_gpa(xpc_activate_mq_uv->gru_mq_desc); | 791 | uv_gpa(xpc_activate_mq_uv->gru_mq_desc); |
813 | return 0; | 792 | return 0; |
814 | } | 793 | } |
815 | 794 | ||
816 | static void | 795 | static void |
817 | xpc_send_heartbeat_uv(int msg_type) | 796 | xpc_allow_hb_uv(short partid) |
818 | { | 797 | { |
819 | short partid; | 798 | } |
820 | struct xpc_partition *part; | ||
821 | struct xpc_activate_mq_msg_heartbeat_req_uv msg; | ||
822 | |||
823 | /* | ||
824 | * !!! On uv we're broadcasting a heartbeat message every 5 seconds. | ||
825 | * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20 | ||
826 | * !!! seconds. This is an increase in numalink traffic. | ||
827 | * ??? Is this good? | ||
828 | */ | ||
829 | |||
830 | msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv); | ||
831 | |||
832 | partid = find_first_bit(xpc_heartbeating_to_mask_uv, | ||
833 | XP_MAX_NPARTITIONS_UV); | ||
834 | |||
835 | while (partid < XP_MAX_NPARTITIONS_UV) { | ||
836 | part = &xpc_partitions[partid]; | ||
837 | 799 | ||
838 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | 800 | static void |
839 | msg_type); | 801 | xpc_disallow_hb_uv(short partid) |
802 | { | ||
803 | } | ||
840 | 804 | ||
841 | partid = find_next_bit(xpc_heartbeating_to_mask_uv, | 805 | static void |
842 | XP_MAX_NPARTITIONS_UV, partid + 1); | 806 | xpc_disallow_all_hbs_uv(void) |
843 | } | 807 | { |
844 | } | 808 | } |
845 | 809 | ||
846 | static void | 810 | static void |
847 | xpc_increment_heartbeat_uv(void) | 811 | xpc_increment_heartbeat_uv(void) |
848 | { | 812 | { |
849 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV); | 813 | xpc_heartbeat_uv->value++; |
850 | } | 814 | } |
851 | 815 | ||
852 | static void | 816 | static void |
853 | xpc_offline_heartbeat_uv(void) | 817 | xpc_offline_heartbeat_uv(void) |
854 | { | 818 | { |
855 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); | 819 | xpc_increment_heartbeat_uv(); |
820 | xpc_heartbeat_uv->offline = 1; | ||
856 | } | 821 | } |
857 | 822 | ||
858 | static void | 823 | static void |
859 | xpc_online_heartbeat_uv(void) | 824 | xpc_online_heartbeat_uv(void) |
860 | { | 825 | { |
861 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV); | 826 | xpc_increment_heartbeat_uv(); |
827 | xpc_heartbeat_uv->offline = 0; | ||
862 | } | 828 | } |
863 | 829 | ||
864 | static void | 830 | static void |
865 | xpc_heartbeat_init_uv(void) | 831 | xpc_heartbeat_init_uv(void) |
866 | { | 832 | { |
867 | atomic64_set(&xpc_heartbeat_uv, 0); | 833 | xpc_heartbeat_uv->value = 1; |
868 | bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); | 834 | xpc_heartbeat_uv->offline = 0; |
869 | xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0]; | ||
870 | } | 835 | } |
871 | 836 | ||
872 | static void | 837 | static void |
873 | xpc_heartbeat_exit_uv(void) | 838 | xpc_heartbeat_exit_uv(void) |
874 | { | 839 | { |
875 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); | 840 | xpc_offline_heartbeat_uv(); |
876 | } | 841 | } |
877 | 842 | ||
878 | static enum xp_retval | 843 | static enum xp_retval |
879 | xpc_get_remote_heartbeat_uv(struct xpc_partition *part) | 844 | xpc_get_remote_heartbeat_uv(struct xpc_partition *part) |
880 | { | 845 | { |
881 | struct xpc_partition_uv *part_uv = &part->sn.uv; | 846 | struct xpc_partition_uv *part_uv = &part->sn.uv; |
882 | enum xp_retval ret = xpNoHeartbeat; | 847 | enum xp_retval ret; |
883 | 848 | ||
884 | if (part_uv->remote_act_state != XPC_P_AS_INACTIVE && | 849 | ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), |
885 | part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) { | 850 | part_uv->heartbeat_gpa, |
851 | sizeof(struct xpc_heartbeat_uv)); | ||
852 | if (ret != xpSuccess) | ||
853 | return ret; | ||
886 | 854 | ||
887 | if (part_uv->heartbeat != part->last_heartbeat || | 855 | if (part_uv->cached_heartbeat.value == part->last_heartbeat && |
888 | (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) { | 856 | !part_uv->cached_heartbeat.offline) { |
889 | 857 | ||
890 | part->last_heartbeat = part_uv->heartbeat; | 858 | ret = xpNoHeartbeat; |
891 | ret = xpSuccess; | 859 | } else { |
892 | } | 860 | part->last_heartbeat = part_uv->cached_heartbeat.value; |
893 | } | 861 | } |
894 | return ret; | 862 | return ret; |
895 | } | 863 | } |
@@ -904,8 +872,9 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, | |||
904 | 872 | ||
905 | part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ | 873 | part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ |
906 | part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; | 874 | part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; |
875 | part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; | ||
907 | part->sn.uv.activate_gru_mq_desc_gpa = | 876 | part->sn.uv.activate_gru_mq_desc_gpa = |
908 | remote_rp->sn.activate_gru_mq_desc_gpa; | 877 | remote_rp->sn.uv.activate_gru_mq_desc_gpa; |
909 | 878 | ||
910 | /* | 879 | /* |
911 | * ??? Is it a good idea to make this conditional on what is | 880 | * ??? Is it a good idea to make this conditional on what is |
@@ -913,8 +882,9 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, | |||
913 | */ | 882 | */ |
914 | if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { | 883 | if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { |
915 | msg.rp_gpa = uv_gpa(xpc_rsvd_page); | 884 | msg.rp_gpa = uv_gpa(xpc_rsvd_page); |
885 | msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; | ||
916 | msg.activate_gru_mq_desc_gpa = | 886 | msg.activate_gru_mq_desc_gpa = |
917 | xpc_rsvd_page->sn.activate_gru_mq_desc_gpa; | 887 | xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; |
918 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | 888 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
919 | XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); | 889 | XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); |
920 | } | 890 | } |
@@ -1010,7 +980,7 @@ xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) | |||
1010 | * Setup the channel structures that are uv specific. | 980 | * Setup the channel structures that are uv specific. |
1011 | */ | 981 | */ |
1012 | static enum xp_retval | 982 | static enum xp_retval |
1013 | xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) | 983 | xpc_setup_ch_structures_uv(struct xpc_partition *part) |
1014 | { | 984 | { |
1015 | struct xpc_channel_uv *ch_uv; | 985 | struct xpc_channel_uv *ch_uv; |
1016 | int ch_number; | 986 | int ch_number; |
@@ -1029,7 +999,7 @@ xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) | |||
1029 | * Teardown the channel structures that are uv specific. | 999 | * Teardown the channel structures that are uv specific. |
1030 | */ | 1000 | */ |
1031 | static void | 1001 | static void |
1032 | xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part) | 1002 | xpc_teardown_ch_structures_uv(struct xpc_partition *part) |
1033 | { | 1003 | { |
1034 | /* nothing needs to be done */ | 1004 | /* nothing needs to be done */ |
1035 | return; | 1005 | return; |
@@ -1243,6 +1213,16 @@ xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) | |||
1243 | } | 1213 | } |
1244 | 1214 | ||
1245 | static void | 1215 | static void |
1216 | xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags) | ||
1217 | { | ||
1218 | struct xpc_activate_mq_msg_chctl_opencomplete_uv msg; | ||
1219 | |||
1220 | msg.ch_number = ch->number; | ||
1221 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | ||
1222 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV); | ||
1223 | } | ||
1224 | |||
1225 | static void | ||
1246 | xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) | 1226 | xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) |
1247 | { | 1227 | { |
1248 | unsigned long irq_flags; | 1228 | unsigned long irq_flags; |
@@ -1669,58 +1649,67 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload) | |||
1669 | msg->hdr.msg_slot_number += ch->remote_nentries; | 1649 | msg->hdr.msg_slot_number += ch->remote_nentries; |
1670 | } | 1650 | } |
1671 | 1651 | ||
1652 | static struct xpc_arch_operations xpc_arch_ops_uv = { | ||
1653 | .setup_partitions = xpc_setup_partitions_uv, | ||
1654 | .teardown_partitions = xpc_teardown_partitions_uv, | ||
1655 | .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, | ||
1656 | .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv, | ||
1657 | .setup_rsvd_page = xpc_setup_rsvd_page_uv, | ||
1658 | |||
1659 | .allow_hb = xpc_allow_hb_uv, | ||
1660 | .disallow_hb = xpc_disallow_hb_uv, | ||
1661 | .disallow_all_hbs = xpc_disallow_all_hbs_uv, | ||
1662 | .increment_heartbeat = xpc_increment_heartbeat_uv, | ||
1663 | .offline_heartbeat = xpc_offline_heartbeat_uv, | ||
1664 | .online_heartbeat = xpc_online_heartbeat_uv, | ||
1665 | .heartbeat_init = xpc_heartbeat_init_uv, | ||
1666 | .heartbeat_exit = xpc_heartbeat_exit_uv, | ||
1667 | .get_remote_heartbeat = xpc_get_remote_heartbeat_uv, | ||
1668 | |||
1669 | .request_partition_activation = | ||
1670 | xpc_request_partition_activation_uv, | ||
1671 | .request_partition_reactivation = | ||
1672 | xpc_request_partition_reactivation_uv, | ||
1673 | .request_partition_deactivation = | ||
1674 | xpc_request_partition_deactivation_uv, | ||
1675 | .cancel_partition_deactivation_request = | ||
1676 | xpc_cancel_partition_deactivation_request_uv, | ||
1677 | |||
1678 | .setup_ch_structures = xpc_setup_ch_structures_uv, | ||
1679 | .teardown_ch_structures = xpc_teardown_ch_structures_uv, | ||
1680 | |||
1681 | .make_first_contact = xpc_make_first_contact_uv, | ||
1682 | |||
1683 | .get_chctl_all_flags = xpc_get_chctl_all_flags_uv, | ||
1684 | .send_chctl_closerequest = xpc_send_chctl_closerequest_uv, | ||
1685 | .send_chctl_closereply = xpc_send_chctl_closereply_uv, | ||
1686 | .send_chctl_openrequest = xpc_send_chctl_openrequest_uv, | ||
1687 | .send_chctl_openreply = xpc_send_chctl_openreply_uv, | ||
1688 | .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv, | ||
1689 | .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv, | ||
1690 | |||
1691 | .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv, | ||
1692 | |||
1693 | .setup_msg_structures = xpc_setup_msg_structures_uv, | ||
1694 | .teardown_msg_structures = xpc_teardown_msg_structures_uv, | ||
1695 | |||
1696 | .indicate_partition_engaged = xpc_indicate_partition_engaged_uv, | ||
1697 | .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv, | ||
1698 | .assume_partition_disengaged = xpc_assume_partition_disengaged_uv, | ||
1699 | .partition_engaged = xpc_partition_engaged_uv, | ||
1700 | .any_partition_engaged = xpc_any_partition_engaged_uv, | ||
1701 | |||
1702 | .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv, | ||
1703 | .send_payload = xpc_send_payload_uv, | ||
1704 | .get_deliverable_payload = xpc_get_deliverable_payload_uv, | ||
1705 | .received_payload = xpc_received_payload_uv, | ||
1706 | .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, | ||
1707 | }; | ||
1708 | |||
1672 | int | 1709 | int |
1673 | xpc_init_uv(void) | 1710 | xpc_init_uv(void) |
1674 | { | 1711 | { |
1675 | xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv; | 1712 | xpc_arch_ops = xpc_arch_ops_uv; |
1676 | xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_uv; | ||
1677 | xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv; | ||
1678 | xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv; | ||
1679 | xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv; | ||
1680 | xpc_increment_heartbeat = xpc_increment_heartbeat_uv; | ||
1681 | xpc_offline_heartbeat = xpc_offline_heartbeat_uv; | ||
1682 | xpc_online_heartbeat = xpc_online_heartbeat_uv; | ||
1683 | xpc_heartbeat_init = xpc_heartbeat_init_uv; | ||
1684 | xpc_heartbeat_exit = xpc_heartbeat_exit_uv; | ||
1685 | xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv; | ||
1686 | |||
1687 | xpc_request_partition_activation = xpc_request_partition_activation_uv; | ||
1688 | xpc_request_partition_reactivation = | ||
1689 | xpc_request_partition_reactivation_uv; | ||
1690 | xpc_request_partition_deactivation = | ||
1691 | xpc_request_partition_deactivation_uv; | ||
1692 | xpc_cancel_partition_deactivation_request = | ||
1693 | xpc_cancel_partition_deactivation_request_uv; | ||
1694 | |||
1695 | xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv; | ||
1696 | xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv; | ||
1697 | |||
1698 | xpc_make_first_contact = xpc_make_first_contact_uv; | ||
1699 | |||
1700 | xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv; | ||
1701 | xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv; | ||
1702 | xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv; | ||
1703 | xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv; | ||
1704 | xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv; | ||
1705 | |||
1706 | xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv; | ||
1707 | |||
1708 | xpc_setup_msg_structures = xpc_setup_msg_structures_uv; | ||
1709 | xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv; | ||
1710 | |||
1711 | xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv; | ||
1712 | xpc_indicate_partition_disengaged = | ||
1713 | xpc_indicate_partition_disengaged_uv; | ||
1714 | xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv; | ||
1715 | xpc_partition_engaged = xpc_partition_engaged_uv; | ||
1716 | xpc_any_partition_engaged = xpc_any_partition_engaged_uv; | ||
1717 | |||
1718 | xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv; | ||
1719 | xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv; | ||
1720 | xpc_send_payload = xpc_send_payload_uv; | ||
1721 | xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv; | ||
1722 | xpc_get_deliverable_payload = xpc_get_deliverable_payload_uv; | ||
1723 | xpc_received_payload = xpc_received_payload_uv; | ||
1724 | 1713 | ||
1725 | if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { | 1714 | if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { |
1726 | dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", | 1715 | dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c232d11a7ed4..06084dbf1277 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -208,7 +208,7 @@ static int mmc_read_ext_csd(struct mmc_card *card) | |||
208 | } | 208 | } |
209 | 209 | ||
210 | ext_csd_struct = ext_csd[EXT_CSD_REV]; | 210 | ext_csd_struct = ext_csd[EXT_CSD_REV]; |
211 | if (ext_csd_struct > 2) { | 211 | if (ext_csd_struct > 3) { |
212 | printk(KERN_ERR "%s: unrecognised EXT_CSD structure " | 212 | printk(KERN_ERR "%s: unrecognised EXT_CSD structure " |
213 | "version %d\n", mmc_hostname(card->host), | 213 | "version %d\n", mmc_hostname(card->host), |
214 | ext_csd_struct); | 214 | ext_csd_struct); |
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 26fc098d77cd..cd81c395e164 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c | |||
@@ -363,15 +363,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, | |||
363 | goto err; | 363 | goto err; |
364 | 364 | ||
365 | /* | 365 | /* |
366 | * For SPI, enable CRC as appropriate. | ||
367 | */ | ||
368 | if (mmc_host_is_spi(host)) { | ||
369 | err = mmc_spi_set_crc(host, use_spi_crc); | ||
370 | if (err) | ||
371 | goto err; | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * Fetch CID from card. | 366 | * Fetch CID from card. |
376 | */ | 367 | */ |
377 | if (mmc_host_is_spi(host)) | 368 | if (mmc_host_is_spi(host)) |
@@ -458,6 +449,18 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, | |||
458 | } | 449 | } |
459 | 450 | ||
460 | /* | 451 | /* |
452 | * For SPI, enable CRC as appropriate. | ||
453 | * This CRC enable is located AFTER the reading of the | ||
454 | * card registers because some SDHC cards are not able | ||
455 | * to provide valid CRCs for non-512-byte blocks. | ||
456 | */ | ||
457 | if (mmc_host_is_spi(host)) { | ||
458 | err = mmc_spi_set_crc(host, use_spi_crc); | ||
459 | if (err) | ||
460 | goto free_card; | ||
461 | } | ||
462 | |||
463 | /* | ||
461 | * Attempt to change to high-speed (if supported) | 464 | * Attempt to change to high-speed (if supported) |
462 | */ | 465 | */ |
463 | err = mmc_switch_hs(card); | 466 | err = mmc_switch_hs(card); |
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index eb29b1d933ac..e0be21a4a696 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c | |||
@@ -307,13 +307,6 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) | |||
307 | 307 | ||
308 | wmb(); | 308 | wmb(); |
309 | 309 | ||
310 | if (host->actual_bus_width == MMC_BUS_WIDTH_4) | ||
311 | BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */ | ||
312 | else | ||
313 | BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */ | ||
314 | |||
315 | RSSR(host->dma) = DMA_REQ_SDHC; | ||
316 | |||
317 | set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); | 310 | set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); |
318 | clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); | 311 | clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); |
319 | 312 | ||
@@ -818,9 +811,11 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
818 | if (ios->bus_width == MMC_BUS_WIDTH_4) { | 811 | if (ios->bus_width == MMC_BUS_WIDTH_4) { |
819 | host->actual_bus_width = MMC_BUS_WIDTH_4; | 812 | host->actual_bus_width = MMC_BUS_WIDTH_4; |
820 | imx_gpio_mode(PB11_PF_SD_DAT3); | 813 | imx_gpio_mode(PB11_PF_SD_DAT3); |
814 | BLR(host->dma) = 0; /* burst 64 byte read/write */ | ||
821 | } else { | 815 | } else { |
822 | host->actual_bus_width = MMC_BUS_WIDTH_1; | 816 | host->actual_bus_width = MMC_BUS_WIDTH_1; |
823 | imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); | 817 | imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); |
818 | BLR(host->dma) = 16; /* burst 16 byte read/write */ | ||
824 | } | 819 | } |
825 | 820 | ||
826 | if (host->power_mode != ios->power_mode) { | 821 | if (host->power_mode != ios->power_mode) { |
@@ -938,7 +933,7 @@ static void imxmci_check_status(unsigned long data) | |||
938 | mod_timer(&host->timer, jiffies + (HZ>>1)); | 933 | mod_timer(&host->timer, jiffies + (HZ>>1)); |
939 | } | 934 | } |
940 | 935 | ||
941 | static int imxmci_probe(struct platform_device *pdev) | 936 | static int __init imxmci_probe(struct platform_device *pdev) |
942 | { | 937 | { |
943 | struct mmc_host *mmc; | 938 | struct mmc_host *mmc; |
944 | struct imxmci_host *host = NULL; | 939 | struct imxmci_host *host = NULL; |
@@ -1034,6 +1029,7 @@ static int imxmci_probe(struct platform_device *pdev) | |||
1034 | } | 1029 | } |
1035 | host->dma_allocated = 1; | 1030 | host->dma_allocated = 1; |
1036 | imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); | 1031 | imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); |
1032 | RSSR(host->dma) = DMA_REQ_SDHC; | ||
1037 | 1033 | ||
1038 | tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); | 1034 | tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); |
1039 | host->status_reg=0; | 1035 | host->status_reg=0; |
@@ -1079,7 +1075,7 @@ out: | |||
1079 | return ret; | 1075 | return ret; |
1080 | } | 1076 | } |
1081 | 1077 | ||
1082 | static int imxmci_remove(struct platform_device *pdev) | 1078 | static int __exit imxmci_remove(struct platform_device *pdev) |
1083 | { | 1079 | { |
1084 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 1080 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
1085 | 1081 | ||
@@ -1145,8 +1141,7 @@ static int imxmci_resume(struct platform_device *dev) | |||
1145 | #endif /* CONFIG_PM */ | 1141 | #endif /* CONFIG_PM */ |
1146 | 1142 | ||
1147 | static struct platform_driver imxmci_driver = { | 1143 | static struct platform_driver imxmci_driver = { |
1148 | .probe = imxmci_probe, | 1144 | .remove = __exit_p(imxmci_remove), |
1149 | .remove = imxmci_remove, | ||
1150 | .suspend = imxmci_suspend, | 1145 | .suspend = imxmci_suspend, |
1151 | .resume = imxmci_resume, | 1146 | .resume = imxmci_resume, |
1152 | .driver = { | 1147 | .driver = { |
@@ -1157,7 +1152,7 @@ static struct platform_driver imxmci_driver = { | |||
1157 | 1152 | ||
1158 | static int __init imxmci_init(void) | 1153 | static int __init imxmci_init(void) |
1159 | { | 1154 | { |
1160 | return platform_driver_register(&imxmci_driver); | 1155 | return platform_driver_probe(&imxmci_driver, imxmci_probe); |
1161 | } | 1156 | } |
1162 | 1157 | ||
1163 | static void __exit imxmci_exit(void) | 1158 | static void __exit imxmci_exit(void) |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 72f8bde4877a..f48349d18c92 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c | |||
@@ -24,7 +24,7 @@ | |||
24 | * along with this program; if not, write to the Free Software | 24 | * along with this program; if not, write to the Free Software |
25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
26 | */ | 26 | */ |
27 | #include <linux/hrtimer.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/bio.h> | 29 | #include <linux/bio.h> |
30 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
@@ -95,7 +95,7 @@ | |||
95 | * reads which takes nowhere near that long. Older cards may be able to use | 95 | * reads which takes nowhere near that long. Older cards may be able to use |
96 | * shorter timeouts ... but why bother? | 96 | * shorter timeouts ... but why bother? |
97 | */ | 97 | */ |
98 | #define r1b_timeout ktime_set(3, 0) | 98 | #define r1b_timeout (HZ * 3) |
99 | 99 | ||
100 | 100 | ||
101 | /****************************************************************************/ | 101 | /****************************************************************************/ |
@@ -183,12 +183,11 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) | |||
183 | return status; | 183 | return status; |
184 | } | 184 | } |
185 | 185 | ||
186 | static int | 186 | static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, |
187 | mmc_spi_skip(struct mmc_spi_host *host, ktime_t timeout, unsigned n, u8 byte) | 187 | unsigned n, u8 byte) |
188 | { | 188 | { |
189 | u8 *cp = host->data->status; | 189 | u8 *cp = host->data->status; |
190 | 190 | unsigned long start = jiffies; | |
191 | timeout = ktime_add(timeout, ktime_get()); | ||
192 | 191 | ||
193 | while (1) { | 192 | while (1) { |
194 | int status; | 193 | int status; |
@@ -203,22 +202,26 @@ mmc_spi_skip(struct mmc_spi_host *host, ktime_t timeout, unsigned n, u8 byte) | |||
203 | return cp[i]; | 202 | return cp[i]; |
204 | } | 203 | } |
205 | 204 | ||
206 | /* REVISIT investigate msleep() to avoid busy-wait I/O | 205 | if (time_is_before_jiffies(start + timeout)) |
207 | * in at least some cases. | ||
208 | */ | ||
209 | if (ktime_to_ns(ktime_sub(ktime_get(), timeout)) > 0) | ||
210 | break; | 206 | break; |
207 | |||
208 | /* If we need long timeouts, we may release the CPU. | ||
209 | * We use jiffies here because we want to have a relation | ||
210 | * between elapsed time and the blocking of the scheduler. | ||
211 | */ | ||
212 | if (time_is_before_jiffies(start+1)) | ||
213 | schedule(); | ||
211 | } | 214 | } |
212 | return -ETIMEDOUT; | 215 | return -ETIMEDOUT; |
213 | } | 216 | } |
214 | 217 | ||
215 | static inline int | 218 | static inline int |
216 | mmc_spi_wait_unbusy(struct mmc_spi_host *host, ktime_t timeout) | 219 | mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout) |
217 | { | 220 | { |
218 | return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); | 221 | return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); |
219 | } | 222 | } |
220 | 223 | ||
221 | static int mmc_spi_readtoken(struct mmc_spi_host *host, ktime_t timeout) | 224 | static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout) |
222 | { | 225 | { |
223 | return mmc_spi_skip(host, timeout, 1, 0xff); | 226 | return mmc_spi_skip(host, timeout, 1, 0xff); |
224 | } | 227 | } |
@@ -251,6 +254,10 @@ static int mmc_spi_response_get(struct mmc_spi_host *host, | |||
251 | u8 *cp = host->data->status; | 254 | u8 *cp = host->data->status; |
252 | u8 *end = cp + host->t.len; | 255 | u8 *end = cp + host->t.len; |
253 | int value = 0; | 256 | int value = 0; |
257 | int bitshift; | ||
258 | u8 leftover = 0; | ||
259 | unsigned short rotator; | ||
260 | int i; | ||
254 | char tag[32]; | 261 | char tag[32]; |
255 | 262 | ||
256 | snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s", | 263 | snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s", |
@@ -268,9 +275,8 @@ static int mmc_spi_response_get(struct mmc_spi_host *host, | |||
268 | 275 | ||
269 | /* Data block reads (R1 response types) may need more data... */ | 276 | /* Data block reads (R1 response types) may need more data... */ |
270 | if (cp == end) { | 277 | if (cp == end) { |
271 | unsigned i; | ||
272 | |||
273 | cp = host->data->status; | 278 | cp = host->data->status; |
279 | end = cp+1; | ||
274 | 280 | ||
275 | /* Card sends N(CR) (== 1..8) bytes of all-ones then one | 281 | /* Card sends N(CR) (== 1..8) bytes of all-ones then one |
276 | * status byte ... and we already scanned 2 bytes. | 282 | * status byte ... and we already scanned 2 bytes. |
@@ -295,20 +301,34 @@ static int mmc_spi_response_get(struct mmc_spi_host *host, | |||
295 | } | 301 | } |
296 | 302 | ||
297 | checkstatus: | 303 | checkstatus: |
298 | if (*cp & 0x80) { | 304 | bitshift = 0; |
299 | dev_dbg(&host->spi->dev, "%s: INVALID RESPONSE, %02x\n", | 305 | if (*cp & 0x80) { |
300 | tag, *cp); | 306 | /* Houston, we have an ugly card with a bit-shifted response */ |
301 | value = -EBADR; | 307 | rotator = *cp++ << 8; |
302 | goto done; | 308 | /* read the next byte */ |
309 | if (cp == end) { | ||
310 | value = mmc_spi_readbytes(host, 1); | ||
311 | if (value < 0) | ||
312 | goto done; | ||
313 | cp = host->data->status; | ||
314 | end = cp+1; | ||
315 | } | ||
316 | rotator |= *cp++; | ||
317 | while (rotator & 0x8000) { | ||
318 | bitshift++; | ||
319 | rotator <<= 1; | ||
320 | } | ||
321 | cmd->resp[0] = rotator >> 8; | ||
322 | leftover = rotator; | ||
323 | } else { | ||
324 | cmd->resp[0] = *cp++; | ||
303 | } | 325 | } |
304 | |||
305 | cmd->resp[0] = *cp++; | ||
306 | cmd->error = 0; | 326 | cmd->error = 0; |
307 | 327 | ||
308 | /* Status byte: the entire seven-bit R1 response. */ | 328 | /* Status byte: the entire seven-bit R1 response. */ |
309 | if (cmd->resp[0] != 0) { | 329 | if (cmd->resp[0] != 0) { |
310 | if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS | 330 | if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS |
311 | | R1_SPI_ILLEGAL_COMMAND) | 331 | | R1_SPI_ILLEGAL_COMMAND) |
312 | & cmd->resp[0]) | 332 | & cmd->resp[0]) |
313 | value = -EINVAL; | 333 | value = -EINVAL; |
314 | else if (R1_SPI_COM_CRC & cmd->resp[0]) | 334 | else if (R1_SPI_COM_CRC & cmd->resp[0]) |
@@ -336,12 +356,45 @@ checkstatus: | |||
336 | * SPI R5 == R1 + data byte; IO_RW_DIRECT | 356 | * SPI R5 == R1 + data byte; IO_RW_DIRECT |
337 | */ | 357 | */ |
338 | case MMC_RSP_SPI_R2: | 358 | case MMC_RSP_SPI_R2: |
339 | cmd->resp[0] |= *cp << 8; | 359 | /* read the next byte */ |
360 | if (cp == end) { | ||
361 | value = mmc_spi_readbytes(host, 1); | ||
362 | if (value < 0) | ||
363 | goto done; | ||
364 | cp = host->data->status; | ||
365 | end = cp+1; | ||
366 | } | ||
367 | if (bitshift) { | ||
368 | rotator = leftover << 8; | ||
369 | rotator |= *cp << bitshift; | ||
370 | cmd->resp[0] |= (rotator & 0xFF00); | ||
371 | } else { | ||
372 | cmd->resp[0] |= *cp << 8; | ||
373 | } | ||
340 | break; | 374 | break; |
341 | 375 | ||
342 | /* SPI R3, R4, or R7 == R1 + 4 bytes */ | 376 | /* SPI R3, R4, or R7 == R1 + 4 bytes */ |
343 | case MMC_RSP_SPI_R3: | 377 | case MMC_RSP_SPI_R3: |
344 | cmd->resp[1] = get_unaligned_be32(cp); | 378 | rotator = leftover << 8; |
379 | cmd->resp[1] = 0; | ||
380 | for (i = 0; i < 4; i++) { | ||
381 | cmd->resp[1] <<= 8; | ||
382 | /* read the next byte */ | ||
383 | if (cp == end) { | ||
384 | value = mmc_spi_readbytes(host, 1); | ||
385 | if (value < 0) | ||
386 | goto done; | ||
387 | cp = host->data->status; | ||
388 | end = cp+1; | ||
389 | } | ||
390 | if (bitshift) { | ||
391 | rotator |= *cp++ << bitshift; | ||
392 | cmd->resp[1] |= (rotator >> 8); | ||
393 | rotator <<= 8; | ||
394 | } else { | ||
395 | cmd->resp[1] |= *cp++; | ||
396 | } | ||
397 | } | ||
345 | break; | 398 | break; |
346 | 399 | ||
347 | /* SPI R1 == just one status byte */ | 400 | /* SPI R1 == just one status byte */ |
@@ -607,7 +660,7 @@ mmc_spi_setup_data_message( | |||
607 | */ | 660 | */ |
608 | static int | 661 | static int |
609 | mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, | 662 | mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, |
610 | ktime_t timeout) | 663 | unsigned long timeout) |
611 | { | 664 | { |
612 | struct spi_device *spi = host->spi; | 665 | struct spi_device *spi = host->spi; |
613 | int status, i; | 666 | int status, i; |
@@ -717,11 +770,13 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, | |||
717 | */ | 770 | */ |
718 | static int | 771 | static int |
719 | mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, | 772 | mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, |
720 | ktime_t timeout) | 773 | unsigned long timeout) |
721 | { | 774 | { |
722 | struct spi_device *spi = host->spi; | 775 | struct spi_device *spi = host->spi; |
723 | int status; | 776 | int status; |
724 | struct scratch *scratch = host->data; | 777 | struct scratch *scratch = host->data; |
778 | unsigned int bitshift; | ||
779 | u8 leftover; | ||
725 | 780 | ||
726 | /* At least one SD card sends an all-zeroes byte when N(CX) | 781 | /* At least one SD card sends an all-zeroes byte when N(CX) |
727 | * applies, before the all-ones bytes ... just cope with that. | 782 | * applies, before the all-ones bytes ... just cope with that. |
@@ -733,38 +788,60 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, | |||
733 | if (status == 0xff || status == 0) | 788 | if (status == 0xff || status == 0) |
734 | status = mmc_spi_readtoken(host, timeout); | 789 | status = mmc_spi_readtoken(host, timeout); |
735 | 790 | ||
736 | if (status == SPI_TOKEN_SINGLE) { | 791 | if (status < 0) { |
737 | if (host->dma_dev) { | 792 | dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status); |
738 | dma_sync_single_for_device(host->dma_dev, | 793 | return status; |
739 | host->data_dma, sizeof(*scratch), | 794 | } |
740 | DMA_BIDIRECTIONAL); | ||
741 | dma_sync_single_for_device(host->dma_dev, | ||
742 | t->rx_dma, t->len, | ||
743 | DMA_FROM_DEVICE); | ||
744 | } | ||
745 | 795 | ||
746 | status = spi_sync(spi, &host->m); | 796 | /* The token may be bit-shifted... |
797 | * the first 0-bit precedes the data stream. | ||
798 | */ | ||
799 | bitshift = 7; | ||
800 | while (status & 0x80) { | ||
801 | status <<= 1; | ||
802 | bitshift--; | ||
803 | } | ||
804 | leftover = status << 1; | ||
747 | 805 | ||
748 | if (host->dma_dev) { | 806 | if (host->dma_dev) { |
749 | dma_sync_single_for_cpu(host->dma_dev, | 807 | dma_sync_single_for_device(host->dma_dev, |
750 | host->data_dma, sizeof(*scratch), | 808 | host->data_dma, sizeof(*scratch), |
751 | DMA_BIDIRECTIONAL); | 809 | DMA_BIDIRECTIONAL); |
752 | dma_sync_single_for_cpu(host->dma_dev, | 810 | dma_sync_single_for_device(host->dma_dev, |
753 | t->rx_dma, t->len, | 811 | t->rx_dma, t->len, |
754 | DMA_FROM_DEVICE); | 812 | DMA_FROM_DEVICE); |
755 | } | 813 | } |
756 | 814 | ||
757 | } else { | 815 | status = spi_sync(spi, &host->m); |
758 | dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status); | ||
759 | 816 | ||
760 | /* we've read extra garbage, timed out, etc */ | 817 | if (host->dma_dev) { |
761 | if (status < 0) | 818 | dma_sync_single_for_cpu(host->dma_dev, |
762 | return status; | 819 | host->data_dma, sizeof(*scratch), |
820 | DMA_BIDIRECTIONAL); | ||
821 | dma_sync_single_for_cpu(host->dma_dev, | ||
822 | t->rx_dma, t->len, | ||
823 | DMA_FROM_DEVICE); | ||
824 | } | ||
763 | 825 | ||
764 | /* low four bits are an R2 subset, fifth seems to be | 826 | if (bitshift) { |
765 | * vendor specific ... map them all to generic error.. | 827 | /* Walk through the data and the crc and do |
828 | * all the magic to get byte-aligned data. | ||
766 | */ | 829 | */ |
767 | return -EIO; | 830 | u8 *cp = t->rx_buf; |
831 | unsigned int len; | ||
832 | unsigned int bitright = 8 - bitshift; | ||
833 | u8 temp; | ||
834 | for (len = t->len; len; len--) { | ||
835 | temp = *cp; | ||
836 | *cp++ = leftover | (temp >> bitshift); | ||
837 | leftover = temp << bitright; | ||
838 | } | ||
839 | cp = (u8 *) &scratch->crc_val; | ||
840 | temp = *cp; | ||
841 | *cp++ = leftover | (temp >> bitshift); | ||
842 | leftover = temp << bitright; | ||
843 | temp = *cp; | ||
844 | *cp = leftover | (temp >> bitshift); | ||
768 | } | 845 | } |
769 | 846 | ||
770 | if (host->mmc->use_spi_crc) { | 847 | if (host->mmc->use_spi_crc) { |
@@ -803,7 +880,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, | |||
803 | unsigned n_sg; | 880 | unsigned n_sg; |
804 | int multiple = (data->blocks > 1); | 881 | int multiple = (data->blocks > 1); |
805 | u32 clock_rate; | 882 | u32 clock_rate; |
806 | ktime_t timeout; | 883 | unsigned long timeout; |
807 | 884 | ||
808 | if (data->flags & MMC_DATA_READ) | 885 | if (data->flags & MMC_DATA_READ) |
809 | direction = DMA_FROM_DEVICE; | 886 | direction = DMA_FROM_DEVICE; |
@@ -817,8 +894,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, | |||
817 | else | 894 | else |
818 | clock_rate = spi->max_speed_hz; | 895 | clock_rate = spi->max_speed_hz; |
819 | 896 | ||
820 | timeout = ktime_add_ns(ktime_set(0, 0), data->timeout_ns + | 897 | timeout = data->timeout_ns + |
821 | data->timeout_clks * 1000000 / clock_rate); | 898 | data->timeout_clks * 1000000 / clock_rate; |
899 | timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1; | ||
822 | 900 | ||
823 | /* Handle scatterlist segments one at a time, with synch for | 901 | /* Handle scatterlist segments one at a time, with synch for |
824 | * each 512-byte block | 902 | * each 512-byte block |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index d183be6f2a5f..e62a22a7f00c 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -298,7 +298,6 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) | |||
298 | struct mmc_request *mrq = host->mrq; | 298 | struct mmc_request *mrq = host->mrq; |
299 | 299 | ||
300 | host->mrq = NULL; | 300 | host->mrq = NULL; |
301 | mmc_omap_fclk_lazy_disable(host); | ||
302 | mmc_request_done(host->mmc, mrq); | 301 | mmc_request_done(host->mmc, mrq); |
303 | return; | 302 | return; |
304 | } | 303 | } |
@@ -434,6 +433,8 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | |||
434 | if (host->mrq == NULL) { | 433 | if (host->mrq == NULL) { |
435 | OMAP_HSMMC_WRITE(host->base, STAT, | 434 | OMAP_HSMMC_WRITE(host->base, STAT, |
436 | OMAP_HSMMC_READ(host->base, STAT)); | 435 | OMAP_HSMMC_READ(host->base, STAT)); |
436 | /* Flush posted write */ | ||
437 | OMAP_HSMMC_READ(host->base, STAT); | ||
437 | return IRQ_HANDLED; | 438 | return IRQ_HANDLED; |
438 | } | 439 | } |
439 | 440 | ||
@@ -489,8 +490,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | |||
489 | } | 490 | } |
490 | 491 | ||
491 | OMAP_HSMMC_WRITE(host->base, STAT, status); | 492 | OMAP_HSMMC_WRITE(host->base, STAT, status); |
493 | /* Flush posted write */ | ||
494 | OMAP_HSMMC_READ(host->base, STAT); | ||
492 | 495 | ||
493 | if (end_cmd || (status & CC)) | 496 | if (end_cmd || ((status & CC) && host->cmd)) |
494 | mmc_omap_cmd_done(host, host->cmd); | 497 | mmc_omap_cmd_done(host, host->cmd); |
495 | if (end_trans || (status & TC)) | 498 | if (end_trans || (status & TC)) |
496 | mmc_omap_xfer_done(host, data); | 499 | mmc_omap_xfer_done(host, data); |
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index c5b316e22371..cd37962ec44f 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -729,6 +729,6 @@ static void __exit sdhci_drv_exit(void) | |||
729 | module_init(sdhci_drv_init); | 729 | module_init(sdhci_drv_init); |
730 | module_exit(sdhci_drv_exit); | 730 | module_exit(sdhci_drv_exit); |
731 | 731 | ||
732 | MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); | 732 | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); |
733 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); | 733 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); |
734 | MODULE_LICENSE("GPL"); | 734 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 30d8e3d4e6fd..9234be2226e7 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -1935,7 +1935,7 @@ module_exit(sdhci_drv_exit); | |||
1935 | 1935 | ||
1936 | module_param(debug_quirks, uint, 0444); | 1936 | module_param(debug_quirks, uint, 0444); |
1937 | 1937 | ||
1938 | MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); | 1938 | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); |
1939 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); | 1939 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); |
1940 | MODULE_LICENSE("GPL"); | 1940 | MODULE_LICENSE("GPL"); |
1941 | 1941 | ||
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index adda37952032..89bf8cd25cac 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c | |||
@@ -2036,7 +2036,7 @@ module_param_named(irq, param_irq, uint, 0444); | |||
2036 | module_param_named(dma, param_dma, int, 0444); | 2036 | module_param_named(dma, param_dma, int, 0444); |
2037 | 2037 | ||
2038 | MODULE_LICENSE("GPL"); | 2038 | MODULE_LICENSE("GPL"); |
2039 | MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); | 2039 | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); |
2040 | MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver"); | 2040 | MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver"); |
2041 | 2041 | ||
2042 | #ifdef CONFIG_PNP | 2042 | #ifdef CONFIG_PNP |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9e7baec45720..9e921544ba20 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -977,6 +977,8 @@ config ETHOC | |||
977 | depends on NET_ETHERNET && HAS_IOMEM | 977 | depends on NET_ETHERNET && HAS_IOMEM |
978 | select MII | 978 | select MII |
979 | select PHYLIB | 979 | select PHYLIB |
980 | select CRC32 | ||
981 | select BITREVERSE | ||
980 | help | 982 | help |
981 | Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. | 983 | Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. |
982 | 984 | ||
@@ -2056,6 +2058,27 @@ config IGB_DCA | |||
2056 | driver. DCA is a method for warming the CPU cache before data | 2058 | driver. DCA is a method for warming the CPU cache before data |
2057 | is used, with the intent of lessening the impact of cache misses. | 2059 | is used, with the intent of lessening the impact of cache misses. |
2058 | 2060 | ||
2061 | config IGBVF | ||
2062 | tristate "Intel(R) 82576 Virtual Function Ethernet support" | ||
2063 | depends on PCI | ||
2064 | ---help--- | ||
2065 | This driver supports Intel(R) 82576 virtual functions. For more | ||
2066 | information on how to identify your adapter, go to the Adapter & | ||
2067 | Driver ID Guide at: | ||
2068 | |||
2069 | <http://support.intel.com/support/network/adapter/pro100/21397.htm> | ||
2070 | |||
2071 | For general information and support, go to the Intel support | ||
2072 | website at: | ||
2073 | |||
2074 | <http://support.intel.com> | ||
2075 | |||
2076 | More specific information on configuring the driver is in | ||
2077 | <file:Documentation/networking/e1000.txt>. | ||
2078 | |||
2079 | To compile this driver as a module, choose M here. The module | ||
2080 | will be called igbvf. | ||
2081 | |||
2059 | source "drivers/net/ixp2000/Kconfig" | 2082 | source "drivers/net/ixp2000/Kconfig" |
2060 | 2083 | ||
2061 | config MYRI_SBUS | 2084 | config MYRI_SBUS |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index edc9a0d6171d..1fc4602a6ff2 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/ | |||
6 | obj-$(CONFIG_E1000E) += e1000e/ | 6 | obj-$(CONFIG_E1000E) += e1000e/ |
7 | obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ | 7 | obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ |
8 | obj-$(CONFIG_IGB) += igb/ | 8 | obj-$(CONFIG_IGB) += igb/ |
9 | obj-$(CONFIG_IGBVF) += igbvf/ | ||
9 | obj-$(CONFIG_IXGBE) += ixgbe/ | 10 | obj-$(CONFIG_IXGBE) += ixgbe/ |
10 | obj-$(CONFIG_IXGB) += ixgb/ | 11 | obj-$(CONFIG_IXGB) += ixgb/ |
11 | obj-$(CONFIG_IP1000) += ipg.o | 12 | obj-$(CONFIG_IP1000) += ipg.o |
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index d0d0c2fee054..02f64d578641 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c | |||
@@ -692,6 +692,17 @@ static struct zorro_driver a2065_driver = { | |||
692 | .remove = __devexit_p(a2065_remove_one), | 692 | .remove = __devexit_p(a2065_remove_one), |
693 | }; | 693 | }; |
694 | 694 | ||
695 | static const struct net_device_ops lance_netdev_ops = { | ||
696 | .ndo_open = lance_open, | ||
697 | .ndo_stop = lance_close, | ||
698 | .ndo_start_xmit = lance_start_xmit, | ||
699 | .ndo_tx_timeout = lance_tx_timeout, | ||
700 | .ndo_set_multicast_list = lance_set_multicast, | ||
701 | .ndo_validate_addr = eth_validate_addr, | ||
702 | .ndo_change_mtu = eth_change_mtu, | ||
703 | .ndo_set_mac_address = eth_mac_addr, | ||
704 | }; | ||
705 | |||
695 | static int __devinit a2065_init_one(struct zorro_dev *z, | 706 | static int __devinit a2065_init_one(struct zorro_dev *z, |
696 | const struct zorro_device_id *ent) | 707 | const struct zorro_device_id *ent) |
697 | { | 708 | { |
@@ -753,12 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z, | |||
753 | priv->rx_ring_mod_mask = RX_RING_MOD_MASK; | 764 | priv->rx_ring_mod_mask = RX_RING_MOD_MASK; |
754 | priv->tx_ring_mod_mask = TX_RING_MOD_MASK; | 765 | priv->tx_ring_mod_mask = TX_RING_MOD_MASK; |
755 | 766 | ||
756 | dev->open = &lance_open; | 767 | dev->netdev_ops = &lance_netdev_ops; |
757 | dev->stop = &lance_close; | ||
758 | dev->hard_start_xmit = &lance_start_xmit; | ||
759 | dev->tx_timeout = &lance_tx_timeout; | ||
760 | dev->watchdog_timeo = 5*HZ; | 768 | dev->watchdog_timeo = 5*HZ; |
761 | dev->set_multicast_list = &lance_set_multicast; | ||
762 | dev->dma = 0; | 769 | dev->dma = 0; |
763 | 770 | ||
764 | init_timer(&priv->multicast_timer); | 771 | init_timer(&priv->multicast_timer); |
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index e1d72e06f3e1..58e8d522e5bc 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c | |||
@@ -155,6 +155,18 @@ static struct zorro_driver ariadne_driver = { | |||
155 | .remove = __devexit_p(ariadne_remove_one), | 155 | .remove = __devexit_p(ariadne_remove_one), |
156 | }; | 156 | }; |
157 | 157 | ||
158 | static const struct net_device_ops ariadne_netdev_ops = { | ||
159 | .ndo_open = ariadne_open, | ||
160 | .ndo_stop = ariadne_close, | ||
161 | .ndo_start_xmit = ariadne_start_xmit, | ||
162 | .ndo_tx_timeout = ariadne_tx_timeout, | ||
163 | .ndo_get_stats = ariadne_get_stats, | ||
164 | .ndo_set_multicast_list = set_multicast_list, | ||
165 | .ndo_validate_addr = eth_validate_addr, | ||
166 | .ndo_change_mtu = eth_change_mtu, | ||
167 | .ndo_set_mac_address = eth_mac_addr, | ||
168 | }; | ||
169 | |||
158 | static int __devinit ariadne_init_one(struct zorro_dev *z, | 170 | static int __devinit ariadne_init_one(struct zorro_dev *z, |
159 | const struct zorro_device_id *ent) | 171 | const struct zorro_device_id *ent) |
160 | { | 172 | { |
@@ -197,13 +209,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, | |||
197 | dev->mem_start = ZTWO_VADDR(mem_start); | 209 | dev->mem_start = ZTWO_VADDR(mem_start); |
198 | dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE; | 210 | dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE; |
199 | 211 | ||
200 | dev->open = &ariadne_open; | 212 | dev->netdev_ops = &ariadne_netdev_ops; |
201 | dev->stop = &ariadne_close; | ||
202 | dev->hard_start_xmit = &ariadne_start_xmit; | ||
203 | dev->tx_timeout = &ariadne_tx_timeout; | ||
204 | dev->watchdog_timeo = 5*HZ; | 213 | dev->watchdog_timeo = 5*HZ; |
205 | dev->get_stats = &ariadne_get_stats; | ||
206 | dev->set_multicast_list = &set_multicast_list; | ||
207 | 214 | ||
208 | err = register_netdev(dev); | 215 | err = register_netdev(dev); |
209 | if (err) { | 216 | if (err) { |
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 4bc6901b3819..627bc75da17d 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c | |||
@@ -665,6 +665,20 @@ static void __init am79c961_banner(void) | |||
665 | if (net_debug && version_printed++ == 0) | 665 | if (net_debug && version_printed++ == 0) |
666 | printk(KERN_INFO "%s", version); | 666 | printk(KERN_INFO "%s", version); |
667 | } | 667 | } |
668 | static const struct net_device_ops am79c961_netdev_ops = { | ||
669 | .ndo_open = am79c961_open, | ||
670 | .ndo_stop = am79c961_close, | ||
671 | .ndo_start_xmit = am79c961_sendpacket, | ||
672 | .ndo_get_stats = am79c961_getstats, | ||
673 | .ndo_set_multicast_list = am79c961_setmulticastlist, | ||
674 | .ndo_tx_timeout = am79c961_timeout, | ||
675 | .ndo_validate_addr = eth_validate_addr, | ||
676 | .ndo_change_mtu = eth_change_mtu, | ||
677 | .ndo_set_mac_address = eth_mac_addr, | ||
678 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
679 | .ndo_poll_controller = am79c961_poll_controller, | ||
680 | #endif | ||
681 | }; | ||
668 | 682 | ||
669 | static int __init am79c961_probe(struct platform_device *pdev) | 683 | static int __init am79c961_probe(struct platform_device *pdev) |
670 | { | 684 | { |
@@ -732,15 +746,7 @@ static int __init am79c961_probe(struct platform_device *pdev) | |||
732 | if (am79c961_hw_init(dev)) | 746 | if (am79c961_hw_init(dev)) |
733 | goto release; | 747 | goto release; |
734 | 748 | ||
735 | dev->open = am79c961_open; | 749 | dev->netdev_ops = &am79c961_netdev_ops; |
736 | dev->stop = am79c961_close; | ||
737 | dev->hard_start_xmit = am79c961_sendpacket; | ||
738 | dev->get_stats = am79c961_getstats; | ||
739 | dev->set_multicast_list = am79c961_setmulticastlist; | ||
740 | dev->tx_timeout = am79c961_timeout; | ||
741 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
742 | dev->poll_controller = am79c961_poll_controller; | ||
743 | #endif | ||
744 | 750 | ||
745 | ret = register_netdev(dev); | 751 | ret = register_netdev(dev); |
746 | if (ret == 0) { | 752 | if (ret == 0) { |
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 442938d50380..7f4bc8ae5462 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c | |||
@@ -577,7 +577,7 @@ static void at91ether_sethashtable(struct net_device *dev) | |||
577 | /* | 577 | /* |
578 | * Enable/Disable promiscuous and multicast modes. | 578 | * Enable/Disable promiscuous and multicast modes. |
579 | */ | 579 | */ |
580 | static void at91ether_set_rx_mode(struct net_device *dev) | 580 | static void at91ether_set_multicast_list(struct net_device *dev) |
581 | { | 581 | { |
582 | unsigned long cfg; | 582 | unsigned long cfg; |
583 | 583 | ||
@@ -808,7 +808,7 @@ static int at91ether_close(struct net_device *dev) | |||
808 | /* | 808 | /* |
809 | * Transmit packet. | 809 | * Transmit packet. |
810 | */ | 810 | */ |
811 | static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) | 811 | static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) |
812 | { | 812 | { |
813 | struct at91_private *lp = netdev_priv(dev); | 813 | struct at91_private *lp = netdev_priv(dev); |
814 | 814 | ||
@@ -828,7 +828,7 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) | |||
828 | 828 | ||
829 | dev->trans_start = jiffies; | 829 | dev->trans_start = jiffies; |
830 | } else { | 830 | } else { |
831 | printk(KERN_ERR "at91_ether.c: at91ether_tx() called, but device is busy!\n"); | 831 | printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); |
832 | return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) | 832 | return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) |
833 | on this skb, he also reports -ENETDOWN and printk's, so either | 833 | on this skb, he also reports -ENETDOWN and printk's, so either |
834 | we free and return(0) or don't free and return 1 */ | 834 | we free and return(0) or don't free and return 1 */ |
@@ -965,6 +965,21 @@ static void at91ether_poll_controller(struct net_device *dev) | |||
965 | } | 965 | } |
966 | #endif | 966 | #endif |
967 | 967 | ||
968 | static const struct net_device_ops at91ether_netdev_ops = { | ||
969 | .ndo_open = at91ether_open, | ||
970 | .ndo_stop = at91ether_close, | ||
971 | .ndo_start_xmit = at91ether_start_xmit, | ||
972 | .ndo_get_stats = at91ether_stats, | ||
973 | .ndo_set_multicast_list = at91ether_set_multicast_list, | ||
974 | .ndo_set_mac_address = set_mac_address, | ||
975 | .ndo_do_ioctl = at91ether_ioctl, | ||
976 | .ndo_validate_addr = eth_validate_addr, | ||
977 | .ndo_change_mtu = eth_change_mtu, | ||
978 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
979 | .ndo_poll_controller = at91ether_poll_controller, | ||
980 | #endif | ||
981 | }; | ||
982 | |||
968 | /* | 983 | /* |
969 | * Initialize the ethernet interface | 984 | * Initialize the ethernet interface |
970 | */ | 985 | */ |
@@ -1005,17 +1020,8 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add | |||
1005 | spin_lock_init(&lp->lock); | 1020 | spin_lock_init(&lp->lock); |
1006 | 1021 | ||
1007 | ether_setup(dev); | 1022 | ether_setup(dev); |
1008 | dev->open = at91ether_open; | 1023 | dev->netdev_ops = &at91ether_netdev_ops; |
1009 | dev->stop = at91ether_close; | ||
1010 | dev->hard_start_xmit = at91ether_tx; | ||
1011 | dev->get_stats = at91ether_stats; | ||
1012 | dev->set_multicast_list = at91ether_set_rx_mode; | ||
1013 | dev->set_mac_address = set_mac_address; | ||
1014 | dev->ethtool_ops = &at91ether_ethtool_ops; | 1024 | dev->ethtool_ops = &at91ether_ethtool_ops; |
1015 | dev->do_ioctl = at91ether_ioctl; | ||
1016 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1017 | dev->poll_controller = at91ether_poll_controller; | ||
1018 | #endif | ||
1019 | 1025 | ||
1020 | SET_NETDEV_DEV(dev, &pdev->dev); | 1026 | SET_NETDEV_DEV(dev, &pdev->dev); |
1021 | 1027 | ||
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index cc7708775da0..b72b3d639f6e 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -153,7 +153,7 @@ struct ep93xx_descs | |||
153 | struct ep93xx_priv | 153 | struct ep93xx_priv |
154 | { | 154 | { |
155 | struct resource *res; | 155 | struct resource *res; |
156 | void *base_addr; | 156 | void __iomem *base_addr; |
157 | int irq; | 157 | int irq; |
158 | 158 | ||
159 | struct ep93xx_descs *descs; | 159 | struct ep93xx_descs *descs; |
@@ -770,7 +770,18 @@ static struct ethtool_ops ep93xx_ethtool_ops = { | |||
770 | .get_link = ep93xx_get_link, | 770 | .get_link = ep93xx_get_link, |
771 | }; | 771 | }; |
772 | 772 | ||
773 | struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) | 773 | static const struct net_device_ops ep93xx_netdev_ops = { |
774 | .ndo_open = ep93xx_open, | ||
775 | .ndo_stop = ep93xx_close, | ||
776 | .ndo_start_xmit = ep93xx_xmit, | ||
777 | .ndo_get_stats = ep93xx_get_stats, | ||
778 | .ndo_do_ioctl = ep93xx_ioctl, | ||
779 | .ndo_validate_addr = eth_validate_addr, | ||
780 | .ndo_change_mtu = eth_change_mtu, | ||
781 | .ndo_set_mac_address = eth_mac_addr, | ||
782 | }; | ||
783 | |||
784 | static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) | ||
774 | { | 785 | { |
775 | struct net_device *dev; | 786 | struct net_device *dev; |
776 | 787 | ||
@@ -780,12 +791,8 @@ struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) | |||
780 | 791 | ||
781 | memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); | 792 | memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); |
782 | 793 | ||
783 | dev->get_stats = ep93xx_get_stats; | ||
784 | dev->ethtool_ops = &ep93xx_ethtool_ops; | 794 | dev->ethtool_ops = &ep93xx_ethtool_ops; |
785 | dev->hard_start_xmit = ep93xx_xmit; | 795 | dev->netdev_ops = &ep93xx_netdev_ops; |
786 | dev->open = ep93xx_open; | ||
787 | dev->stop = ep93xx_close; | ||
788 | dev->do_ioctl = ep93xx_ioctl; | ||
789 | 796 | ||
790 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | 797 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; |
791 | 798 | ||
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c index e380de454463..edf770f639fa 100644 --- a/drivers/net/arm/ether1.c +++ b/drivers/net/arm/ether1.c | |||
@@ -991,6 +991,18 @@ static void __devinit ether1_banner(void) | |||
991 | printk(KERN_INFO "%s", version); | 991 | printk(KERN_INFO "%s", version); |
992 | } | 992 | } |
993 | 993 | ||
994 | static const struct net_device_ops ether1_netdev_ops = { | ||
995 | .ndo_open = ether1_open, | ||
996 | .ndo_stop = ether1_close, | ||
997 | .ndo_start_xmit = ether1_sendpacket, | ||
998 | .ndo_get_stats = ether1_getstats, | ||
999 | .ndo_set_multicast_list = ether1_setmulticastlist, | ||
1000 | .ndo_tx_timeout = ether1_timeout, | ||
1001 | .ndo_validate_addr = eth_validate_addr, | ||
1002 | .ndo_change_mtu = eth_change_mtu, | ||
1003 | .ndo_set_mac_address = eth_mac_addr, | ||
1004 | }; | ||
1005 | |||
994 | static int __devinit | 1006 | static int __devinit |
995 | ether1_probe(struct expansion_card *ec, const struct ecard_id *id) | 1007 | ether1_probe(struct expansion_card *ec, const struct ecard_id *id) |
996 | { | 1008 | { |
@@ -1031,12 +1043,7 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
1031 | goto free; | 1043 | goto free; |
1032 | } | 1044 | } |
1033 | 1045 | ||
1034 | dev->open = ether1_open; | 1046 | dev->netdev_ops = ðer1_netdev_ops; |
1035 | dev->stop = ether1_close; | ||
1036 | dev->hard_start_xmit = ether1_sendpacket; | ||
1037 | dev->get_stats = ether1_getstats; | ||
1038 | dev->set_multicast_list = ether1_setmulticastlist; | ||
1039 | dev->tx_timeout = ether1_timeout; | ||
1040 | dev->watchdog_timeo = 5 * HZ / 100; | 1047 | dev->watchdog_timeo = 5 * HZ / 100; |
1041 | 1048 | ||
1042 | ret = register_netdev(dev); | 1049 | ret = register_netdev(dev); |
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c index 21a7bef12d3b..ec8a1ae1e887 100644 --- a/drivers/net/arm/ether3.c +++ b/drivers/net/arm/ether3.c | |||
@@ -770,6 +770,18 @@ static void __devinit ether3_banner(void) | |||
770 | printk(KERN_INFO "%s", version); | 770 | printk(KERN_INFO "%s", version); |
771 | } | 771 | } |
772 | 772 | ||
773 | static const struct net_device_ops ether3_netdev_ops = { | ||
774 | .ndo_open = ether3_open, | ||
775 | .ndo_stop = ether3_close, | ||
776 | .ndo_start_xmit = ether3_sendpacket, | ||
777 | .ndo_get_stats = ether3_getstats, | ||
778 | .ndo_set_multicast_list = ether3_setmulticastlist, | ||
779 | .ndo_tx_timeout = ether3_timeout, | ||
780 | .ndo_validate_addr = eth_validate_addr, | ||
781 | .ndo_change_mtu = eth_change_mtu, | ||
782 | .ndo_set_mac_address = eth_mac_addr, | ||
783 | }; | ||
784 | |||
773 | static int __devinit | 785 | static int __devinit |
774 | ether3_probe(struct expansion_card *ec, const struct ecard_id *id) | 786 | ether3_probe(struct expansion_card *ec, const struct ecard_id *id) |
775 | { | 787 | { |
@@ -846,12 +858,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
846 | goto free; | 858 | goto free; |
847 | } | 859 | } |
848 | 860 | ||
849 | dev->open = ether3_open; | 861 | dev->netdev_ops = ðer3_netdev_ops; |
850 | dev->stop = ether3_close; | ||
851 | dev->hard_start_xmit = ether3_sendpacket; | ||
852 | dev->get_stats = ether3_getstats; | ||
853 | dev->set_multicast_list = ether3_setmulticastlist; | ||
854 | dev->tx_timeout = ether3_timeout; | ||
855 | dev->watchdog_timeo = 5 * HZ / 100; | 862 | dev->watchdog_timeo = 5 * HZ / 100; |
856 | 863 | ||
857 | ret = register_netdev(dev); | 864 | ret = register_netdev(dev); |
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index 2d81f6afcb58..5425ab0c38c0 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c | |||
@@ -453,6 +453,16 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag, | |||
453 | return( ret ); | 453 | return( ret ); |
454 | } | 454 | } |
455 | 455 | ||
456 | static const struct net_device_ops lance_netdev_ops = { | ||
457 | .ndo_open = lance_open, | ||
458 | .ndo_stop = lance_close, | ||
459 | .ndo_start_xmit = lance_start_xmit, | ||
460 | .ndo_set_multicast_list = set_multicast_list, | ||
461 | .ndo_set_mac_address = lance_set_mac_address, | ||
462 | .ndo_tx_timeout = lance_tx_timeout, | ||
463 | .ndo_validate_addr = eth_validate_addr, | ||
464 | .ndo_change_mtu = eth_change_mtu, | ||
465 | }; | ||
456 | 466 | ||
457 | static unsigned long __init lance_probe1( struct net_device *dev, | 467 | static unsigned long __init lance_probe1( struct net_device *dev, |
458 | struct lance_addr *init_rec ) | 468 | struct lance_addr *init_rec ) |
@@ -623,15 +633,9 @@ static unsigned long __init lance_probe1( struct net_device *dev, | |||
623 | if (did_version++ == 0) | 633 | if (did_version++ == 0) |
624 | DPRINTK( 1, ( version )); | 634 | DPRINTK( 1, ( version )); |
625 | 635 | ||
626 | /* The LANCE-specific entries in the device structure. */ | 636 | dev->netdev_ops = &lance_netdev_ops; |
627 | dev->open = &lance_open; | ||
628 | dev->hard_start_xmit = &lance_start_xmit; | ||
629 | dev->stop = &lance_close; | ||
630 | dev->set_multicast_list = &set_multicast_list; | ||
631 | dev->set_mac_address = &lance_set_mac_address; | ||
632 | 637 | ||
633 | /* XXX MSch */ | 638 | /* XXX MSch */ |
634 | dev->tx_timeout = lance_tx_timeout; | ||
635 | dev->watchdog_timeo = TX_TIMEOUT; | 639 | dev->watchdog_timeo = TX_TIMEOUT; |
636 | 640 | ||
637 | return( 1 ); | 641 | return( 1 ); |
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index deb7b53167ee..83a12125b94e 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c | |||
@@ -2532,8 +2532,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev, | |||
2532 | * various kernel subsystems to support the mechanics required by a | 2532 | * various kernel subsystems to support the mechanics required by a |
2533 | * fixed-high-32-bit system. | 2533 | * fixed-high-32-bit system. |
2534 | */ | 2534 | */ |
2535 | if ((pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) || | 2535 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || |
2536 | (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) != 0)) { | 2536 | (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { |
2537 | dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); | 2537 | dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); |
2538 | goto err_dma; | 2538 | goto err_dma; |
2539 | } | 2539 | } |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 4274e4ac963b..d58c105fc779 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -1004,12 +1004,12 @@ static void au1000_tx_timeout(struct net_device *dev) | |||
1004 | netif_wake_queue(dev); | 1004 | netif_wake_queue(dev); |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | static void set_rx_mode(struct net_device *dev) | 1007 | static void au1000_multicast_list(struct net_device *dev) |
1008 | { | 1008 | { |
1009 | struct au1000_private *aup = netdev_priv(dev); | 1009 | struct au1000_private *aup = netdev_priv(dev); |
1010 | 1010 | ||
1011 | if (au1000_debug > 4) | 1011 | if (au1000_debug > 4) |
1012 | printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags); | 1012 | printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags); |
1013 | 1013 | ||
1014 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | 1014 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ |
1015 | aup->mac->control |= MAC_PROMISCUOUS; | 1015 | aup->mac->control |= MAC_PROMISCUOUS; |
@@ -1047,6 +1047,18 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1047 | return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); | 1047 | return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | static const struct net_device_ops au1000_netdev_ops = { | ||
1051 | .ndo_open = au1000_open, | ||
1052 | .ndo_stop = au1000_close, | ||
1053 | .ndo_start_xmit = au1000_tx, | ||
1054 | .ndo_set_multicast_list = au1000_multicast_list, | ||
1055 | .ndo_do_ioctl = au1000_ioctl, | ||
1056 | .ndo_tx_timeout = au1000_tx_timeout, | ||
1057 | .ndo_set_mac_address = eth_mac_addr, | ||
1058 | .ndo_validate_addr = eth_validate_addr, | ||
1059 | .ndo_change_mtu = eth_change_mtu, | ||
1060 | }; | ||
1061 | |||
1050 | static struct net_device * au1000_probe(int port_num) | 1062 | static struct net_device * au1000_probe(int port_num) |
1051 | { | 1063 | { |
1052 | static unsigned version_printed = 0; | 1064 | static unsigned version_printed = 0; |
@@ -1197,13 +1209,8 @@ static struct net_device * au1000_probe(int port_num) | |||
1197 | 1209 | ||
1198 | dev->base_addr = base; | 1210 | dev->base_addr = base; |
1199 | dev->irq = irq; | 1211 | dev->irq = irq; |
1200 | dev->open = au1000_open; | 1212 | dev->netdev_ops = &au1000_netdev_ops; |
1201 | dev->hard_start_xmit = au1000_tx; | ||
1202 | dev->stop = au1000_close; | ||
1203 | dev->set_multicast_list = &set_rx_mode; | ||
1204 | dev->do_ioctl = &au1000_ioctl; | ||
1205 | SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); | 1213 | SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); |
1206 | dev->tx_timeout = au1000_tx_timeout; | ||
1207 | dev->watchdog_timeo = ETH_TX_TIMEOUT; | 1214 | dev->watchdog_timeo = ETH_TX_TIMEOUT; |
1208 | 1215 | ||
1209 | /* | 1216 | /* |
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 04f4b73fa8d8..9592f22e4c8c 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
@@ -319,7 +319,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) | |||
319 | 319 | ||
320 | be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause, | 320 | be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause, |
321 | &ecmd->rx_pause); | 321 | &ecmd->rx_pause); |
322 | ecmd->autoneg = AUTONEG_ENABLE; | 322 | ecmd->autoneg = 0; |
323 | } | 323 | } |
324 | 324 | ||
325 | static int | 325 | static int |
@@ -328,7 +328,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) | |||
328 | struct be_adapter *adapter = netdev_priv(netdev); | 328 | struct be_adapter *adapter = netdev_priv(netdev); |
329 | int status; | 329 | int status; |
330 | 330 | ||
331 | if (ecmd->autoneg != AUTONEG_ENABLE) | 331 | if (ecmd->autoneg != 0) |
332 | return -EINVAL; | 332 | return -EINVAL; |
333 | 333 | ||
334 | status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause, | 334 | status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause, |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 9b75aa630062..30d0c81c989e 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -1821,11 +1821,11 @@ static int __devinit be_probe(struct pci_dev *pdev, | |||
1821 | 1821 | ||
1822 | be_msix_enable(adapter); | 1822 | be_msix_enable(adapter); |
1823 | 1823 | ||
1824 | status = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | 1824 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
1825 | if (!status) { | 1825 | if (!status) { |
1826 | netdev->features |= NETIF_F_HIGHDMA; | 1826 | netdev->features |= NETIF_F_HIGHDMA; |
1827 | } else { | 1827 | } else { |
1828 | status = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 1828 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
1829 | if (status) { | 1829 | if (status) { |
1830 | dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); | 1830 | dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); |
1831 | goto free_netdev; | 1831 | goto free_netdev; |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 9afe8092dfc4..9f971ed6b58d 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -979,6 +979,20 @@ static int bfin_mac_open(struct net_device *dev) | |||
979 | return 0; | 979 | return 0; |
980 | } | 980 | } |
981 | 981 | ||
982 | static const struct net_device_ops bfin_mac_netdev_ops = { | ||
983 | .ndo_open = bfin_mac_open, | ||
984 | .ndo_stop = bfin_mac_close, | ||
985 | .ndo_start_xmit = bfin_mac_hard_start_xmit, | ||
986 | .ndo_set_mac_address = bfin_mac_set_mac_address, | ||
987 | .ndo_tx_timeout = bfin_mac_timeout, | ||
988 | .ndo_set_multicast_list = bfin_mac_set_multicast_list, | ||
989 | .ndo_validate_addr = eth_validate_addr, | ||
990 | .ndo_change_mtu = eth_change_mtu, | ||
991 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
992 | .ndo_poll_controller = bfin_mac_poll, | ||
993 | #endif | ||
994 | }; | ||
995 | |||
982 | /* | 996 | /* |
983 | * | 997 | * |
984 | * this makes the board clean up everything that it can | 998 | * this makes the board clean up everything that it can |
@@ -1086,15 +1100,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) | |||
1086 | /* Fill in the fields of the device structure with ethernet values. */ | 1100 | /* Fill in the fields of the device structure with ethernet values. */ |
1087 | ether_setup(ndev); | 1101 | ether_setup(ndev); |
1088 | 1102 | ||
1089 | ndev->open = bfin_mac_open; | 1103 | ndev->netdev_ops = &bfin_mac_netdev_ops; |
1090 | ndev->stop = bfin_mac_close; | ||
1091 | ndev->hard_start_xmit = bfin_mac_hard_start_xmit; | ||
1092 | ndev->set_mac_address = bfin_mac_set_mac_address; | ||
1093 | ndev->tx_timeout = bfin_mac_timeout; | ||
1094 | ndev->set_multicast_list = bfin_mac_set_multicast_list; | ||
1095 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1096 | ndev->poll_controller = bfin_mac_poll; | ||
1097 | #endif | ||
1098 | ndev->ethtool_ops = &bfin_mac_ethtool_ops; | 1104 | ndev->ethtool_ops = &bfin_mac_ethtool_ops; |
1099 | 1105 | ||
1100 | spin_lock_init(&lp->lock); | 1106 | spin_lock_init(&lp->lock); |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 9d268be0b670..d47839184a06 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -3427,8 +3427,8 @@ static int __devinit | |||
3427 | bnx2_request_firmware(struct bnx2 *bp) | 3427 | bnx2_request_firmware(struct bnx2 *bp) |
3428 | { | 3428 | { |
3429 | const char *mips_fw_file, *rv2p_fw_file; | 3429 | const char *mips_fw_file, *rv2p_fw_file; |
3430 | const struct bnx2_mips_fw_file *mips; | 3430 | const struct bnx2_mips_fw_file *mips_fw; |
3431 | const struct bnx2_rv2p_fw_file *rv2p; | 3431 | const struct bnx2_rv2p_fw_file *rv2p_fw; |
3432 | int rc; | 3432 | int rc; |
3433 | 3433 | ||
3434 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 3434 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
@@ -3452,21 +3452,21 @@ bnx2_request_firmware(struct bnx2 *bp) | |||
3452 | rv2p_fw_file); | 3452 | rv2p_fw_file); |
3453 | return rc; | 3453 | return rc; |
3454 | } | 3454 | } |
3455 | mips = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; | 3455 | mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; |
3456 | rv2p = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; | 3456 | rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; |
3457 | if (bp->mips_firmware->size < sizeof(*mips) || | 3457 | if (bp->mips_firmware->size < sizeof(*mips_fw) || |
3458 | check_mips_fw_entry(bp->mips_firmware, &mips->com) || | 3458 | check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) || |
3459 | check_mips_fw_entry(bp->mips_firmware, &mips->cp) || | 3459 | check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) || |
3460 | check_mips_fw_entry(bp->mips_firmware, &mips->rxp) || | 3460 | check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || |
3461 | check_mips_fw_entry(bp->mips_firmware, &mips->tpat) || | 3461 | check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || |
3462 | check_mips_fw_entry(bp->mips_firmware, &mips->txp)) { | 3462 | check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { |
3463 | printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", | 3463 | printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", |
3464 | mips_fw_file); | 3464 | mips_fw_file); |
3465 | return -EINVAL; | 3465 | return -EINVAL; |
3466 | } | 3466 | } |
3467 | if (bp->rv2p_firmware->size < sizeof(*rv2p) || | 3467 | if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || |
3468 | check_fw_section(bp->rv2p_firmware, &rv2p->proc1.rv2p, 8, true) || | 3468 | check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || |
3469 | check_fw_section(bp->rv2p_firmware, &rv2p->proc2.rv2p, 8, true)) { | 3469 | check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { |
3470 | printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", | 3470 | printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", |
3471 | rv2p_fw_file); | 3471 | rv2p_fw_file); |
3472 | return -EINVAL; | 3472 | return -EINVAL; |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 8dc6fbb9a41e..553a89919778 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -370,8 +370,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct | |||
370 | 370 | ||
371 | if (arp->op_code == htons(ARPOP_REPLY)) { | 371 | if (arp->op_code == htons(ARPOP_REPLY)) { |
372 | /* update rx hash table for this ARP */ | 372 | /* update rx hash table for this ARP */ |
373 | printk("rar: update orig %s bond_dev %s\n", orig_dev->name, | ||
374 | bond_dev->name); | ||
375 | bond = netdev_priv(bond_dev); | 373 | bond = netdev_priv(bond_dev); |
376 | rlb_update_entry_from_arp(bond, arp); | 374 | rlb_update_entry_from_arp(bond, arp); |
377 | pr_debug("Server received an ARP Reply from client\n"); | 375 | pr_debug("Server received an ARP Reply from client\n"); |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 99610f358c40..63369b6b14d4 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2570,7 +2570,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | |||
2570 | 2570 | ||
2571 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { | 2571 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { |
2572 | if (!targets[i]) | 2572 | if (!targets[i]) |
2573 | continue; | 2573 | break; |
2574 | pr_debug("basa: target %x\n", targets[i]); | 2574 | pr_debug("basa: target %x\n", targets[i]); |
2575 | if (list_empty(&bond->vlan_list)) { | 2575 | if (list_empty(&bond->vlan_list)) { |
2576 | pr_debug("basa: empty vlan: arp_send\n"); | 2576 | pr_debug("basa: empty vlan: arp_send\n"); |
@@ -2677,7 +2677,6 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 | |||
2677 | int i; | 2677 | int i; |
2678 | __be32 *targets = bond->params.arp_targets; | 2678 | __be32 *targets = bond->params.arp_targets; |
2679 | 2679 | ||
2680 | targets = bond->params.arp_targets; | ||
2681 | for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { | 2680 | for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { |
2682 | pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", | 2681 | pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", |
2683 | &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); | 2682 | &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); |
@@ -3303,7 +3302,7 @@ static void bond_info_show_master(struct seq_file *seq) | |||
3303 | 3302 | ||
3304 | for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) { | 3303 | for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) { |
3305 | if (!bond->params.arp_targets[i]) | 3304 | if (!bond->params.arp_targets[i]) |
3306 | continue; | 3305 | break; |
3307 | if (printed) | 3306 | if (printed) |
3308 | seq_printf(seq, ","); | 3307 | seq_printf(seq, ","); |
3309 | seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); | 3308 | seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 18cf4787874c..d28731535226 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -684,17 +684,15 @@ static ssize_t bonding_store_arp_targets(struct device *d, | |||
684 | goto out; | 684 | goto out; |
685 | } | 685 | } |
686 | /* look for an empty slot to put the target in, and check for dupes */ | 686 | /* look for an empty slot to put the target in, and check for dupes */ |
687 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { | 687 | for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { |
688 | if (targets[i] == newtarget) { /* duplicate */ | 688 | if (targets[i] == newtarget) { /* duplicate */ |
689 | printk(KERN_ERR DRV_NAME | 689 | printk(KERN_ERR DRV_NAME |
690 | ": %s: ARP target %pI4 is already present\n", | 690 | ": %s: ARP target %pI4 is already present\n", |
691 | bond->dev->name, &newtarget); | 691 | bond->dev->name, &newtarget); |
692 | if (done) | ||
693 | targets[i] = 0; | ||
694 | ret = -EINVAL; | 692 | ret = -EINVAL; |
695 | goto out; | 693 | goto out; |
696 | } | 694 | } |
697 | if (targets[i] == 0 && !done) { | 695 | if (targets[i] == 0) { |
698 | printk(KERN_INFO DRV_NAME | 696 | printk(KERN_INFO DRV_NAME |
699 | ": %s: adding ARP target %pI4.\n", | 697 | ": %s: adding ARP target %pI4.\n", |
700 | bond->dev->name, &newtarget); | 698 | bond->dev->name, &newtarget); |
@@ -720,12 +718,16 @@ static ssize_t bonding_store_arp_targets(struct device *d, | |||
720 | goto out; | 718 | goto out; |
721 | } | 719 | } |
722 | 720 | ||
723 | for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { | 721 | for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { |
724 | if (targets[i] == newtarget) { | 722 | if (targets[i] == newtarget) { |
723 | int j; | ||
725 | printk(KERN_INFO DRV_NAME | 724 | printk(KERN_INFO DRV_NAME |
726 | ": %s: removing ARP target %pI4.\n", | 725 | ": %s: removing ARP target %pI4.\n", |
727 | bond->dev->name, &newtarget); | 726 | bond->dev->name, &newtarget); |
728 | targets[i] = 0; | 727 | for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++) |
728 | targets[j] = targets[j+1]; | ||
729 | |||
730 | targets[j] = 0; | ||
729 | done = 1; | 731 | done = 1; |
730 | } | 732 | } |
731 | } | 733 | } |
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index c9806c58b2fd..7a18dc7e5c7f 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c | |||
@@ -257,6 +257,23 @@ struct transceiver_ops transceivers[] = | |||
257 | 257 | ||
258 | struct transceiver_ops* transceiver = &transceivers[0]; | 258 | struct transceiver_ops* transceiver = &transceivers[0]; |
259 | 259 | ||
260 | static const struct net_device_ops e100_netdev_ops = { | ||
261 | .ndo_open = e100_open, | ||
262 | .ndo_stop = e100_close, | ||
263 | .ndo_start_xmit = e100_send_packet, | ||
264 | .ndo_tx_timeout = e100_tx_timeout, | ||
265 | .ndo_get_stats = e100_get_stats, | ||
266 | .ndo_set_multicast_list = set_multicast_list, | ||
267 | .ndo_do_ioctl = e100_ioctl, | ||
268 | .ndo_set_mac_address = e100_set_mac_address, | ||
269 | .ndo_validate_addr = eth_validate_addr, | ||
270 | .ndo_change_mtu = eth_change_mtu, | ||
271 | .ndo_set_config = e100_set_config, | ||
272 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
273 | .ndo_poll_controller = e100_netpoll, | ||
274 | #endif | ||
275 | }; | ||
276 | |||
260 | #define tx_done(dev) (*R_DMA_CH0_CMD == 0) | 277 | #define tx_done(dev) (*R_DMA_CH0_CMD == 0) |
261 | 278 | ||
262 | /* | 279 | /* |
@@ -300,19 +317,8 @@ etrax_ethernet_init(void) | |||
300 | 317 | ||
301 | /* fill in our handlers so the network layer can talk to us in the future */ | 318 | /* fill in our handlers so the network layer can talk to us in the future */ |
302 | 319 | ||
303 | dev->open = e100_open; | ||
304 | dev->hard_start_xmit = e100_send_packet; | ||
305 | dev->stop = e100_close; | ||
306 | dev->get_stats = e100_get_stats; | ||
307 | dev->set_multicast_list = set_multicast_list; | ||
308 | dev->set_mac_address = e100_set_mac_address; | ||
309 | dev->ethtool_ops = &e100_ethtool_ops; | 320 | dev->ethtool_ops = &e100_ethtool_ops; |
310 | dev->do_ioctl = e100_ioctl; | 321 | dev->netdev_ops = &e100_netdev_ops; |
311 | dev->set_config = e100_set_config; | ||
312 | dev->tx_timeout = e100_tx_timeout; | ||
313 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
314 | dev->poll_controller = e100_netpoll; | ||
315 | #endif | ||
316 | 322 | ||
317 | spin_lock_init(&np->lock); | 323 | spin_lock_init(&np->lock); |
318 | spin_lock_init(&np->led_lock); | 324 | spin_lock_init(&np->led_lock); |
diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 861c867fca87..b62405a69180 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c | |||
@@ -1010,6 +1010,17 @@ static void lance_set_multicast_retry(unsigned long _opaque) | |||
1010 | lance_set_multicast(dev); | 1010 | lance_set_multicast(dev); |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | static const struct net_device_ops lance_netdev_ops = { | ||
1014 | .ndo_open = lance_open, | ||
1015 | .ndo_stop = lance_close, | ||
1016 | .ndo_start_xmit = lance_start_xmit, | ||
1017 | .ndo_tx_timeout = lance_tx_timeout, | ||
1018 | .ndo_set_multicast_list = lance_set_multicast, | ||
1019 | .ndo_change_mtu = eth_change_mtu, | ||
1020 | .ndo_validate_addr = eth_validate_addr, | ||
1021 | .ndo_set_mac_address = eth_mac_addr, | ||
1022 | }; | ||
1023 | |||
1013 | static int __init dec_lance_probe(struct device *bdev, const int type) | 1024 | static int __init dec_lance_probe(struct device *bdev, const int type) |
1014 | { | 1025 | { |
1015 | static unsigned version_printed; | 1026 | static unsigned version_printed; |
@@ -1223,12 +1234,8 @@ static int __init dec_lance_probe(struct device *bdev, const int type) | |||
1223 | 1234 | ||
1224 | printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); | 1235 | printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); |
1225 | 1236 | ||
1226 | dev->open = &lance_open; | 1237 | dev->netdev_ops = &lance_netdev_ops; |
1227 | dev->stop = &lance_close; | ||
1228 | dev->hard_start_xmit = &lance_start_xmit; | ||
1229 | dev->tx_timeout = &lance_tx_timeout; | ||
1230 | dev->watchdog_timeo = 5*HZ; | 1238 | dev->watchdog_timeo = 5*HZ; |
1231 | dev->set_multicast_list = &lance_set_multicast; | ||
1232 | 1239 | ||
1233 | /* lp->ll is the location of the registers for lance card */ | 1240 | /* lp->ll is the location of the registers for lance card */ |
1234 | lp->ll = ll; | 1241 | lp->ll = ll; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index ddc5c533e89c..ef12931d302a 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -156,8 +156,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); | |||
156 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); | 156 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); |
157 | static void e1000_restore_vlan(struct e1000_adapter *adapter); | 157 | static void e1000_restore_vlan(struct e1000_adapter *adapter); |
158 | 158 | ||
159 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); | ||
160 | #ifdef CONFIG_PM | 159 | #ifdef CONFIG_PM |
160 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); | ||
161 | static int e1000_resume(struct pci_dev *pdev); | 161 | static int e1000_resume(struct pci_dev *pdev); |
162 | #endif | 162 | #endif |
163 | static void e1000_shutdown(struct pci_dev *pdev); | 163 | static void e1000_shutdown(struct pci_dev *pdev); |
@@ -3834,7 +3834,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3834 | struct e1000_buffer *buffer_info; | 3834 | struct e1000_buffer *buffer_info; |
3835 | unsigned int i, eop; | 3835 | unsigned int i, eop; |
3836 | unsigned int count = 0; | 3836 | unsigned int count = 0; |
3837 | bool cleaned; | 3837 | bool cleaned = false; |
3838 | unsigned int total_tx_bytes=0, total_tx_packets=0; | 3838 | unsigned int total_tx_bytes=0, total_tx_packets=0; |
3839 | 3839 | ||
3840 | i = tx_ring->next_to_clean; | 3840 | i = tx_ring->next_to_clean; |
@@ -4601,7 +4601,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |||
4601 | return 0; | 4601 | return 0; |
4602 | } | 4602 | } |
4603 | 4603 | ||
4604 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | 4604 | static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) |
4605 | { | 4605 | { |
4606 | struct net_device *netdev = pci_get_drvdata(pdev); | 4606 | struct net_device *netdev = pci_get_drvdata(pdev); |
4607 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4607 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -4664,22 +4664,18 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4664 | 4664 | ||
4665 | ew32(WUC, E1000_WUC_PME_EN); | 4665 | ew32(WUC, E1000_WUC_PME_EN); |
4666 | ew32(WUFC, wufc); | 4666 | ew32(WUFC, wufc); |
4667 | pci_enable_wake(pdev, PCI_D3hot, 1); | ||
4668 | pci_enable_wake(pdev, PCI_D3cold, 1); | ||
4669 | } else { | 4667 | } else { |
4670 | ew32(WUC, 0); | 4668 | ew32(WUC, 0); |
4671 | ew32(WUFC, 0); | 4669 | ew32(WUFC, 0); |
4672 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
4673 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
4674 | } | 4670 | } |
4675 | 4671 | ||
4676 | e1000_release_manageability(adapter); | 4672 | e1000_release_manageability(adapter); |
4677 | 4673 | ||
4674 | *enable_wake = !!wufc; | ||
4675 | |||
4678 | /* make sure adapter isn't asleep if manageability is enabled */ | 4676 | /* make sure adapter isn't asleep if manageability is enabled */ |
4679 | if (adapter->en_mng_pt) { | 4677 | if (adapter->en_mng_pt) |
4680 | pci_enable_wake(pdev, PCI_D3hot, 1); | 4678 | *enable_wake = true; |
4681 | pci_enable_wake(pdev, PCI_D3cold, 1); | ||
4682 | } | ||
4683 | 4679 | ||
4684 | if (hw->phy_type == e1000_phy_igp_3) | 4680 | if (hw->phy_type == e1000_phy_igp_3) |
4685 | e1000_phy_powerdown_workaround(hw); | 4681 | e1000_phy_powerdown_workaround(hw); |
@@ -4693,12 +4689,29 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4693 | 4689 | ||
4694 | pci_disable_device(pdev); | 4690 | pci_disable_device(pdev); |
4695 | 4691 | ||
4696 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
4697 | |||
4698 | return 0; | 4692 | return 0; |
4699 | } | 4693 | } |
4700 | 4694 | ||
4701 | #ifdef CONFIG_PM | 4695 | #ifdef CONFIG_PM |
4696 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | ||
4697 | { | ||
4698 | int retval; | ||
4699 | bool wake; | ||
4700 | |||
4701 | retval = __e1000_shutdown(pdev, &wake); | ||
4702 | if (retval) | ||
4703 | return retval; | ||
4704 | |||
4705 | if (wake) { | ||
4706 | pci_prepare_to_sleep(pdev); | ||
4707 | } else { | ||
4708 | pci_wake_from_d3(pdev, false); | ||
4709 | pci_set_power_state(pdev, PCI_D3hot); | ||
4710 | } | ||
4711 | |||
4712 | return 0; | ||
4713 | } | ||
4714 | |||
4702 | static int e1000_resume(struct pci_dev *pdev) | 4715 | static int e1000_resume(struct pci_dev *pdev) |
4703 | { | 4716 | { |
4704 | struct net_device *netdev = pci_get_drvdata(pdev); | 4717 | struct net_device *netdev = pci_get_drvdata(pdev); |
@@ -4753,7 +4766,14 @@ static int e1000_resume(struct pci_dev *pdev) | |||
4753 | 4766 | ||
4754 | static void e1000_shutdown(struct pci_dev *pdev) | 4767 | static void e1000_shutdown(struct pci_dev *pdev) |
4755 | { | 4768 | { |
4756 | e1000_suspend(pdev, PMSG_SUSPEND); | 4769 | bool wake; |
4770 | |||
4771 | __e1000_shutdown(pdev, &wake); | ||
4772 | |||
4773 | if (system_state == SYSTEM_POWER_OFF) { | ||
4774 | pci_wake_from_d3(pdev, wake); | ||
4775 | pci_set_power_state(pdev, PCI_D3hot); | ||
4776 | } | ||
4757 | } | 4777 | } |
4758 | 4778 | ||
4759 | #ifdef CONFIG_NET_POLL_CONTROLLER | 4779 | #ifdef CONFIG_NET_POLL_CONTROLLER |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 409b58cad0e5..1693ed116b16 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -621,7 +621,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
621 | struct e1000_buffer *buffer_info; | 621 | struct e1000_buffer *buffer_info; |
622 | unsigned int i, eop; | 622 | unsigned int i, eop; |
623 | unsigned int count = 0; | 623 | unsigned int count = 0; |
624 | bool cleaned; | 624 | bool cleaned = false; |
625 | unsigned int total_tx_bytes = 0, total_tx_packets = 0; | 625 | unsigned int total_tx_bytes = 0, total_tx_packets = 0; |
626 | 626 | ||
627 | i = tx_ring->next_to_clean; | 627 | i = tx_ring->next_to_clean; |
@@ -4346,7 +4346,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4346 | } | 4346 | } |
4347 | } | 4347 | } |
4348 | 4348 | ||
4349 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | 4349 | static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) |
4350 | { | 4350 | { |
4351 | struct net_device *netdev = pci_get_drvdata(pdev); | 4351 | struct net_device *netdev = pci_get_drvdata(pdev); |
4352 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4352 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -4409,20 +4409,16 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4409 | 4409 | ||
4410 | ew32(WUC, E1000_WUC_PME_EN); | 4410 | ew32(WUC, E1000_WUC_PME_EN); |
4411 | ew32(WUFC, wufc); | 4411 | ew32(WUFC, wufc); |
4412 | pci_enable_wake(pdev, PCI_D3hot, 1); | ||
4413 | pci_enable_wake(pdev, PCI_D3cold, 1); | ||
4414 | } else { | 4412 | } else { |
4415 | ew32(WUC, 0); | 4413 | ew32(WUC, 0); |
4416 | ew32(WUFC, 0); | 4414 | ew32(WUFC, 0); |
4417 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
4418 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
4419 | } | 4415 | } |
4420 | 4416 | ||
4417 | *enable_wake = !!wufc; | ||
4418 | |||
4421 | /* make sure adapter isn't asleep if manageability is enabled */ | 4419 | /* make sure adapter isn't asleep if manageability is enabled */ |
4422 | if (adapter->flags & FLAG_MNG_PT_ENABLED) { | 4420 | if (adapter->flags & FLAG_MNG_PT_ENABLED) |
4423 | pci_enable_wake(pdev, PCI_D3hot, 1); | 4421 | *enable_wake = true; |
4424 | pci_enable_wake(pdev, PCI_D3cold, 1); | ||
4425 | } | ||
4426 | 4422 | ||
4427 | if (adapter->hw.phy.type == e1000_phy_igp_3) | 4423 | if (adapter->hw.phy.type == e1000_phy_igp_3) |
4428 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); | 4424 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); |
@@ -4435,6 +4431,26 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4435 | 4431 | ||
4436 | pci_disable_device(pdev); | 4432 | pci_disable_device(pdev); |
4437 | 4433 | ||
4434 | return 0; | ||
4435 | } | ||
4436 | |||
4437 | static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) | ||
4438 | { | ||
4439 | if (sleep && wake) { | ||
4440 | pci_prepare_to_sleep(pdev); | ||
4441 | return; | ||
4442 | } | ||
4443 | |||
4444 | pci_wake_from_d3(pdev, wake); | ||
4445 | pci_set_power_state(pdev, PCI_D3hot); | ||
4446 | } | ||
4447 | |||
4448 | static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, | ||
4449 | bool wake) | ||
4450 | { | ||
4451 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
4452 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4453 | |||
4438 | /* | 4454 | /* |
4439 | * The pci-e switch on some quad port adapters will report a | 4455 | * The pci-e switch on some quad port adapters will report a |
4440 | * correctable error when the MAC transitions from D0 to D3. To | 4456 | * correctable error when the MAC transitions from D0 to D3. To |
@@ -4450,14 +4466,12 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4450 | pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, | 4466 | pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, |
4451 | (devctl & ~PCI_EXP_DEVCTL_CERE)); | 4467 | (devctl & ~PCI_EXP_DEVCTL_CERE)); |
4452 | 4468 | ||
4453 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 4469 | e1000_power_off(pdev, sleep, wake); |
4454 | 4470 | ||
4455 | pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); | 4471 | pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); |
4456 | } else { | 4472 | } else { |
4457 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 4473 | e1000_power_off(pdev, sleep, wake); |
4458 | } | 4474 | } |
4459 | |||
4460 | return 0; | ||
4461 | } | 4475 | } |
4462 | 4476 | ||
4463 | static void e1000e_disable_l1aspm(struct pci_dev *pdev) | 4477 | static void e1000e_disable_l1aspm(struct pci_dev *pdev) |
@@ -4486,6 +4500,18 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev) | |||
4486 | } | 4500 | } |
4487 | 4501 | ||
4488 | #ifdef CONFIG_PM | 4502 | #ifdef CONFIG_PM |
4503 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | ||
4504 | { | ||
4505 | int retval; | ||
4506 | bool wake; | ||
4507 | |||
4508 | retval = __e1000_shutdown(pdev, &wake); | ||
4509 | if (!retval) | ||
4510 | e1000_complete_shutdown(pdev, true, wake); | ||
4511 | |||
4512 | return retval; | ||
4513 | } | ||
4514 | |||
4489 | static int e1000_resume(struct pci_dev *pdev) | 4515 | static int e1000_resume(struct pci_dev *pdev) |
4490 | { | 4516 | { |
4491 | struct net_device *netdev = pci_get_drvdata(pdev); | 4517 | struct net_device *netdev = pci_get_drvdata(pdev); |
@@ -4549,7 +4575,12 @@ static int e1000_resume(struct pci_dev *pdev) | |||
4549 | 4575 | ||
4550 | static void e1000_shutdown(struct pci_dev *pdev) | 4576 | static void e1000_shutdown(struct pci_dev *pdev) |
4551 | { | 4577 | { |
4552 | e1000_suspend(pdev, PMSG_SUSPEND); | 4578 | bool wake = false; |
4579 | |||
4580 | __e1000_shutdown(pdev, &wake); | ||
4581 | |||
4582 | if (system_state == SYSTEM_POWER_OFF) | ||
4583 | e1000_complete_shutdown(pdev, false, wake); | ||
4553 | } | 4584 | } |
4554 | 4585 | ||
4555 | #ifdef CONFIG_NET_POLL_CONTROLLER | 4586 | #ifdef CONFIG_NET_POLL_CONTROLLER |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index ac0c5b438e0a..604c844d0769 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -3080,7 +3080,8 @@ static const struct net_device_ops ehea_netdev_ops = { | |||
3080 | .ndo_change_mtu = ehea_change_mtu, | 3080 | .ndo_change_mtu = ehea_change_mtu, |
3081 | .ndo_vlan_rx_register = ehea_vlan_rx_register, | 3081 | .ndo_vlan_rx_register = ehea_vlan_rx_register, |
3082 | .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, | 3082 | .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, |
3083 | .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid | 3083 | .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid, |
3084 | .ndo_tx_timeout = ehea_tx_watchdog, | ||
3084 | }; | 3085 | }; |
3085 | 3086 | ||
3086 | struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | 3087 | struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, |
@@ -3142,7 +3143,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3142 | | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | 3143 | | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX |
3143 | | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | 3144 | | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER |
3144 | | NETIF_F_LLTX; | 3145 | | NETIF_F_LLTX; |
3145 | dev->tx_timeout = &ehea_tx_watchdog; | ||
3146 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; | 3146 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; |
3147 | 3147 | ||
3148 | INIT_WORK(&port->reset_task, ehea_reset_port); | 3148 | INIT_WORK(&port->reset_task, ehea_reset_port); |
diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 51ead7941f83..5210bb1027cc 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c | |||
@@ -542,6 +542,8 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) | |||
542 | } | 542 | } |
543 | spin_unlock_bh(&eql->queue.lock); | 543 | spin_unlock_bh(&eql->queue.lock); |
544 | 544 | ||
545 | dev_put(slave_dev); | ||
546 | |||
545 | return ret; | 547 | return ret; |
546 | } | 548 | } |
547 | 549 | ||
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index a515acccc61f..682e7f0b5581 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -1240,6 +1240,7 @@ static void __inline__ fec_phy_ack_intr(void) | |||
1240 | icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); | 1240 | icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); |
1241 | *icrp = 0x0d000000; | 1241 | *icrp = 0x0d000000; |
1242 | } | 1242 | } |
1243 | #endif | ||
1243 | 1244 | ||
1244 | #ifdef CONFIG_M5272 | 1245 | #ifdef CONFIG_M5272 |
1245 | static void __inline__ fec_get_mac(struct net_device *dev) | 1246 | static void __inline__ fec_get_mac(struct net_device *dev) |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index d37465020bcc..11d5db16ed9c 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -3745,14 +3745,14 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) | |||
3745 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 3745 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
3746 | } | 3746 | } |
3747 | spin_unlock_irqrestore(&np->lock, flags); | 3747 | spin_unlock_irqrestore(&np->lock, flags); |
3748 | __napi_complete(napi); | 3748 | napi_complete(napi); |
3749 | return rx_work; | 3749 | return rx_work; |
3750 | } | 3750 | } |
3751 | 3751 | ||
3752 | if (rx_work < budget) { | 3752 | if (rx_work < budget) { |
3753 | /* re-enable interrupts | 3753 | /* re-enable interrupts |
3754 | (msix not enabled in napi) */ | 3754 | (msix not enabled in napi) */ |
3755 | __napi_complete(napi); | 3755 | napi_complete(napi); |
3756 | 3756 | ||
3757 | writel(np->irqmask, base + NvRegIrqMask); | 3757 | writel(np->irqmask, base + NvRegIrqMask); |
3758 | } | 3758 | } |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index b037ce9857bf..a9cbc3191a2a 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -1019,6 +1019,22 @@ out_put_phy: | |||
1019 | #define IS_FEC(match) 0 | 1019 | #define IS_FEC(match) 0 |
1020 | #endif | 1020 | #endif |
1021 | 1021 | ||
1022 | static const struct net_device_ops fs_enet_netdev_ops = { | ||
1023 | .ndo_open = fs_enet_open, | ||
1024 | .ndo_stop = fs_enet_close, | ||
1025 | .ndo_get_stats = fs_enet_get_stats, | ||
1026 | .ndo_start_xmit = fs_enet_start_xmit, | ||
1027 | .ndo_tx_timeout = fs_timeout, | ||
1028 | .ndo_set_multicast_list = fs_set_multicast_list, | ||
1029 | .ndo_do_ioctl = fs_ioctl, | ||
1030 | .ndo_validate_addr = eth_validate_addr, | ||
1031 | .ndo_set_mac_address = eth_mac_addr, | ||
1032 | .ndo_change_mtu = eth_change_mtu, | ||
1033 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1034 | .ndo_poll_controller = fs_enet_netpoll, | ||
1035 | #endif | ||
1036 | }; | ||
1037 | |||
1022 | static int __devinit fs_enet_probe(struct of_device *ofdev, | 1038 | static int __devinit fs_enet_probe(struct of_device *ofdev, |
1023 | const struct of_device_id *match) | 1039 | const struct of_device_id *match) |
1024 | { | 1040 | { |
@@ -1093,22 +1109,13 @@ static int __devinit fs_enet_probe(struct of_device *ofdev, | |||
1093 | fep->tx_ring = fpi->tx_ring; | 1109 | fep->tx_ring = fpi->tx_ring; |
1094 | fep->rx_ring = fpi->rx_ring; | 1110 | fep->rx_ring = fpi->rx_ring; |
1095 | 1111 | ||
1096 | ndev->open = fs_enet_open; | 1112 | ndev->netdev_ops = &fs_enet_netdev_ops; |
1097 | ndev->hard_start_xmit = fs_enet_start_xmit; | ||
1098 | ndev->tx_timeout = fs_timeout; | ||
1099 | ndev->watchdog_timeo = 2 * HZ; | 1113 | ndev->watchdog_timeo = 2 * HZ; |
1100 | ndev->stop = fs_enet_close; | ||
1101 | ndev->get_stats = fs_enet_get_stats; | ||
1102 | ndev->set_multicast_list = fs_set_multicast_list; | ||
1103 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1104 | ndev->poll_controller = fs_enet_netpoll; | ||
1105 | #endif | ||
1106 | if (fpi->use_napi) | 1114 | if (fpi->use_napi) |
1107 | netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, | 1115 | netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, |
1108 | fpi->napi_weight); | 1116 | fpi->napi_weight); |
1109 | 1117 | ||
1110 | ndev->ethtool_ops = &fs_ethtool_ops; | 1118 | ndev->ethtool_ops = &fs_ethtool_ops; |
1111 | ndev->do_ioctl = fs_ioctl; | ||
1112 | 1119 | ||
1113 | init_timer(&fep->phy_timer_list); | 1120 | init_timer(&fep->phy_timer_list); |
1114 | 1121 | ||
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 65f55877be95..b2c49679bba7 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1583,8 +1583,10 @@ static void gfar_reset_task(struct work_struct *work) | |||
1583 | struct net_device *dev = priv->ndev; | 1583 | struct net_device *dev = priv->ndev; |
1584 | 1584 | ||
1585 | if (dev->flags & IFF_UP) { | 1585 | if (dev->flags & IFF_UP) { |
1586 | netif_stop_queue(dev); | ||
1586 | stop_gfar(dev); | 1587 | stop_gfar(dev); |
1587 | startup_gfar(dev); | 1588 | startup_gfar(dev); |
1589 | netif_start_queue(dev); | ||
1588 | } | 1590 | } |
1589 | 1591 | ||
1590 | netif_tx_schedule_all(dev); | 1592 | netif_tx_schedule_all(dev); |
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 77e4b5b52fc8..806533c831c7 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c | |||
@@ -2686,6 +2686,32 @@ static int __devinit emac_init_config(struct emac_instance *dev) | |||
2686 | return 0; | 2686 | return 0; |
2687 | } | 2687 | } |
2688 | 2688 | ||
2689 | static const struct net_device_ops emac_netdev_ops = { | ||
2690 | .ndo_open = emac_open, | ||
2691 | .ndo_stop = emac_close, | ||
2692 | .ndo_get_stats = emac_stats, | ||
2693 | .ndo_set_multicast_list = emac_set_multicast_list, | ||
2694 | .ndo_do_ioctl = emac_ioctl, | ||
2695 | .ndo_tx_timeout = emac_tx_timeout, | ||
2696 | .ndo_validate_addr = eth_validate_addr, | ||
2697 | .ndo_set_mac_address = eth_mac_addr, | ||
2698 | .ndo_start_xmit = emac_start_xmit, | ||
2699 | .ndo_change_mtu = eth_change_mtu, | ||
2700 | }; | ||
2701 | |||
2702 | static const struct net_device_ops emac_gige_netdev_ops = { | ||
2703 | .ndo_open = emac_open, | ||
2704 | .ndo_stop = emac_close, | ||
2705 | .ndo_get_stats = emac_stats, | ||
2706 | .ndo_set_multicast_list = emac_set_multicast_list, | ||
2707 | .ndo_do_ioctl = emac_ioctl, | ||
2708 | .ndo_tx_timeout = emac_tx_timeout, | ||
2709 | .ndo_validate_addr = eth_validate_addr, | ||
2710 | .ndo_set_mac_address = eth_mac_addr, | ||
2711 | .ndo_start_xmit = emac_start_xmit_sg, | ||
2712 | .ndo_change_mtu = emac_change_mtu, | ||
2713 | }; | ||
2714 | |||
2689 | static int __devinit emac_probe(struct of_device *ofdev, | 2715 | static int __devinit emac_probe(struct of_device *ofdev, |
2690 | const struct of_device_id *match) | 2716 | const struct of_device_id *match) |
2691 | { | 2717 | { |
@@ -2827,23 +2853,14 @@ static int __devinit emac_probe(struct of_device *ofdev, | |||
2827 | if (err != 0) | 2853 | if (err != 0) |
2828 | goto err_detach_tah; | 2854 | goto err_detach_tah; |
2829 | 2855 | ||
2830 | /* Fill in the driver function table */ | ||
2831 | ndev->open = &emac_open; | ||
2832 | if (dev->tah_dev) | 2856 | if (dev->tah_dev) |
2833 | ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 2857 | ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
2834 | ndev->tx_timeout = &emac_tx_timeout; | ||
2835 | ndev->watchdog_timeo = 5 * HZ; | 2858 | ndev->watchdog_timeo = 5 * HZ; |
2836 | ndev->stop = &emac_close; | ||
2837 | ndev->get_stats = &emac_stats; | ||
2838 | ndev->set_multicast_list = &emac_set_multicast_list; | ||
2839 | ndev->do_ioctl = &emac_ioctl; | ||
2840 | if (emac_phy_supports_gige(dev->phy_mode)) { | 2859 | if (emac_phy_supports_gige(dev->phy_mode)) { |
2841 | ndev->hard_start_xmit = &emac_start_xmit_sg; | 2860 | ndev->netdev_ops = &emac_gige_netdev_ops; |
2842 | ndev->change_mtu = &emac_change_mtu; | ||
2843 | dev->commac.ops = &emac_commac_sg_ops; | 2861 | dev->commac.ops = &emac_commac_sg_ops; |
2844 | } else { | 2862 | } else |
2845 | ndev->hard_start_xmit = &emac_start_xmit; | 2863 | ndev->netdev_ops = &emac_netdev_ops; |
2846 | } | ||
2847 | SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); | 2864 | SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); |
2848 | 2865 | ||
2849 | netif_carrier_off(ndev); | 2866 | netif_carrier_off(ndev); |
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index f4c315b5a900..472f3f124840 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c | |||
@@ -111,7 +111,7 @@ void igb_clear_vfta(struct e1000_hw *hw) | |||
111 | * Writes value at the given offset in the register array which stores | 111 | * Writes value at the given offset in the register array which stores |
112 | * the VLAN filter table. | 112 | * the VLAN filter table. |
113 | **/ | 113 | **/ |
114 | void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) | 114 | static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) |
115 | { | 115 | { |
116 | array_wr32(E1000_VFTA, offset, value); | 116 | array_wr32(E1000_VFTA, offset, value); |
117 | wrfl(); | 117 | wrfl(); |
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h index a34de5269637..1d690b4c9ae4 100644 --- a/drivers/net/igb/e1000_mac.h +++ b/drivers/net/igb/e1000_mac.h | |||
@@ -66,7 +66,6 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); | |||
66 | s32 igb_check_alt_mac_addr(struct e1000_hw *hw); | 66 | s32 igb_check_alt_mac_addr(struct e1000_hw *hw); |
67 | void igb_reset_adaptive(struct e1000_hw *hw); | 67 | void igb_reset_adaptive(struct e1000_hw *hw); |
68 | void igb_update_adaptive(struct e1000_hw *hw); | 68 | void igb_update_adaptive(struct e1000_hw *hw); |
69 | void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); | ||
70 | 69 | ||
71 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw); | 70 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw); |
72 | 71 | ||
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c index fe71c7ddaa05..840782fb5736 100644 --- a/drivers/net/igb/e1000_mbx.c +++ b/drivers/net/igb/e1000_mbx.c | |||
@@ -188,7 +188,7 @@ out: | |||
188 | * returns SUCCESS if it successfully received a message notification and | 188 | * returns SUCCESS if it successfully received a message notification and |
189 | * copied it into the receive buffer. | 189 | * copied it into the receive buffer. |
190 | **/ | 190 | **/ |
191 | s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) | 191 | static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) |
192 | { | 192 | { |
193 | struct e1000_mbx_info *mbx = &hw->mbx; | 193 | struct e1000_mbx_info *mbx = &hw->mbx; |
194 | s32 ret_val = -E1000_ERR_MBX; | 194 | s32 ret_val = -E1000_ERR_MBX; |
@@ -214,7 +214,7 @@ out: | |||
214 | * returns SUCCESS if it successfully copied message into the buffer and | 214 | * returns SUCCESS if it successfully copied message into the buffer and |
215 | * received an ack to that message within delay * timeout period | 215 | * received an ack to that message within delay * timeout period |
216 | **/ | 216 | **/ |
217 | s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) | 217 | static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) |
218 | { | 218 | { |
219 | struct e1000_mbx_info *mbx = &hw->mbx; | 219 | struct e1000_mbx_info *mbx = &hw->mbx; |
220 | s32 ret_val = 0; | 220 | s32 ret_val = 0; |
@@ -232,19 +232,6 @@ out: | |||
232 | return ret_val; | 232 | return ret_val; |
233 | } | 233 | } |
234 | 234 | ||
235 | /** | ||
236 | * e1000_init_mbx_ops_generic - Initialize NVM function pointers | ||
237 | * @hw: pointer to the HW structure | ||
238 | * | ||
239 | * Setups up the function pointers to no-op functions | ||
240 | **/ | ||
241 | void e1000_init_mbx_ops_generic(struct e1000_hw *hw) | ||
242 | { | ||
243 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
244 | mbx->ops.read_posted = igb_read_posted_mbx; | ||
245 | mbx->ops.write_posted = igb_write_posted_mbx; | ||
246 | } | ||
247 | |||
248 | static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) | 235 | static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) |
249 | { | 236 | { |
250 | u32 mbvficr = rd32(E1000_MBVFICR); | 237 | u32 mbvficr = rd32(E1000_MBVFICR); |
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h index 6ec9890a8f7a..ebc02ea3f198 100644 --- a/drivers/net/igb/e1000_mbx.h +++ b/drivers/net/igb/e1000_mbx.h | |||
@@ -67,8 +67,6 @@ | |||
67 | 67 | ||
68 | s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); | 68 | s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); |
69 | s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); | 69 | s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); |
70 | s32 igb_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); | ||
71 | s32 igb_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); | ||
72 | s32 igb_check_for_msg(struct e1000_hw *, u16); | 70 | s32 igb_check_for_msg(struct e1000_hw *, u16); |
73 | s32 igb_check_for_ack(struct e1000_hw *, u16); | 71 | s32 igb_check_for_ack(struct e1000_hw *, u16); |
74 | s32 igb_check_for_rst(struct e1000_hw *, u16); | 72 | s32 igb_check_for_rst(struct e1000_hw *, u16); |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 6b0697c565b9..08c801490c72 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -152,14 +152,13 @@ static struct notifier_block dca_notifier = { | |||
152 | /* for netdump / net console */ | 152 | /* for netdump / net console */ |
153 | static void igb_netpoll(struct net_device *); | 153 | static void igb_netpoll(struct net_device *); |
154 | #endif | 154 | #endif |
155 | |||
156 | #ifdef CONFIG_PCI_IOV | 155 | #ifdef CONFIG_PCI_IOV |
157 | static ssize_t igb_set_num_vfs(struct device *, struct device_attribute *, | 156 | static unsigned int max_vfs = 0; |
158 | const char *, size_t); | 157 | module_param(max_vfs, uint, 0); |
159 | static ssize_t igb_show_num_vfs(struct device *, struct device_attribute *, | 158 | MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " |
160 | char *); | 159 | "per physical function"); |
161 | DEVICE_ATTR(num_vfs, S_IRUGO | S_IWUSR, igb_show_num_vfs, igb_set_num_vfs); | 160 | #endif /* CONFIG_PCI_IOV */ |
162 | #endif | 161 | |
163 | static pci_ers_result_t igb_io_error_detected(struct pci_dev *, | 162 | static pci_ers_result_t igb_io_error_detected(struct pci_dev *, |
164 | pci_channel_state_t); | 163 | pci_channel_state_t); |
165 | static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); | 164 | static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); |
@@ -671,6 +670,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) | |||
671 | 670 | ||
672 | /* If we can't do MSI-X, try MSI */ | 671 | /* If we can't do MSI-X, try MSI */ |
673 | msi_only: | 672 | msi_only: |
673 | #ifdef CONFIG_PCI_IOV | ||
674 | /* disable SR-IOV for non MSI-X configurations */ | ||
675 | if (adapter->vf_data) { | ||
676 | struct e1000_hw *hw = &adapter->hw; | ||
677 | /* disable iov and allow time for transactions to clear */ | ||
678 | pci_disable_sriov(adapter->pdev); | ||
679 | msleep(500); | ||
680 | |||
681 | kfree(adapter->vf_data); | ||
682 | adapter->vf_data = NULL; | ||
683 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); | ||
684 | msleep(100); | ||
685 | dev_info(&adapter->pdev->dev, "IOV Disabled\n"); | ||
686 | } | ||
687 | #endif | ||
674 | adapter->num_rx_queues = 1; | 688 | adapter->num_rx_queues = 1; |
675 | adapter->num_tx_queues = 1; | 689 | adapter->num_tx_queues = 1; |
676 | if (!pci_enable_msi(adapter->pdev)) | 690 | if (!pci_enable_msi(adapter->pdev)) |
@@ -1238,6 +1252,46 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1238 | if (err) | 1252 | if (err) |
1239 | goto err_sw_init; | 1253 | goto err_sw_init; |
1240 | 1254 | ||
1255 | #ifdef CONFIG_PCI_IOV | ||
1256 | /* since iov functionality isn't critical to base device function we | ||
1257 | * can accept failure. If it fails we don't allow iov to be enabled */ | ||
1258 | if (hw->mac.type == e1000_82576) { | ||
1259 | /* 82576 supports a maximum of 7 VFs in addition to the PF */ | ||
1260 | unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs; | ||
1261 | int i; | ||
1262 | unsigned char mac_addr[ETH_ALEN]; | ||
1263 | |||
1264 | if (num_vfs) { | ||
1265 | adapter->vf_data = kcalloc(num_vfs, | ||
1266 | sizeof(struct vf_data_storage), | ||
1267 | GFP_KERNEL); | ||
1268 | if (!adapter->vf_data) { | ||
1269 | dev_err(&pdev->dev, | ||
1270 | "Could not allocate VF private data - " | ||
1271 | "IOV enable failed\n"); | ||
1272 | } else { | ||
1273 | err = pci_enable_sriov(pdev, num_vfs); | ||
1274 | if (!err) { | ||
1275 | adapter->vfs_allocated_count = num_vfs; | ||
1276 | dev_info(&pdev->dev, | ||
1277 | "%d vfs allocated\n", | ||
1278 | num_vfs); | ||
1279 | for (i = 0; | ||
1280 | i < adapter->vfs_allocated_count; | ||
1281 | i++) { | ||
1282 | random_ether_addr(mac_addr); | ||
1283 | igb_set_vf_mac(adapter, i, | ||
1284 | mac_addr); | ||
1285 | } | ||
1286 | } else { | ||
1287 | kfree(adapter->vf_data); | ||
1288 | adapter->vf_data = NULL; | ||
1289 | } | ||
1290 | } | ||
1291 | } | ||
1292 | } | ||
1293 | |||
1294 | #endif | ||
1241 | /* setup the private structure */ | 1295 | /* setup the private structure */ |
1242 | err = igb_sw_init(adapter); | 1296 | err = igb_sw_init(adapter); |
1243 | if (err) | 1297 | if (err) |
@@ -1397,19 +1451,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1397 | if (err) | 1451 | if (err) |
1398 | goto err_register; | 1452 | goto err_register; |
1399 | 1453 | ||
1400 | #ifdef CONFIG_PCI_IOV | ||
1401 | /* since iov functionality isn't critical to base device function we | ||
1402 | * can accept failure. If it fails we don't allow iov to be enabled */ | ||
1403 | if (hw->mac.type == e1000_82576) { | ||
1404 | err = pci_enable_sriov(pdev, 0); | ||
1405 | if (!err) | ||
1406 | err = device_create_file(&netdev->dev, | ||
1407 | &dev_attr_num_vfs); | ||
1408 | if (err) | ||
1409 | dev_err(&pdev->dev, "Failed to initialize IOV\n"); | ||
1410 | } | ||
1411 | |||
1412 | #endif | ||
1413 | #ifdef CONFIG_IGB_DCA | 1454 | #ifdef CONFIG_IGB_DCA |
1414 | if (dca_add_requester(&pdev->dev) == 0) { | 1455 | if (dca_add_requester(&pdev->dev) == 0) { |
1415 | adapter->flags |= IGB_FLAG_DCA_ENABLED; | 1456 | adapter->flags |= IGB_FLAG_DCA_ENABLED; |
@@ -5422,89 +5463,4 @@ static void igb_vmm_control(struct igb_adapter *adapter) | |||
5422 | igb_vmdq_set_replication_pf(hw, true); | 5463 | igb_vmdq_set_replication_pf(hw, true); |
5423 | } | 5464 | } |
5424 | 5465 | ||
5425 | #ifdef CONFIG_PCI_IOV | ||
5426 | static ssize_t igb_show_num_vfs(struct device *dev, | ||
5427 | struct device_attribute *attr, char *buf) | ||
5428 | { | ||
5429 | struct igb_adapter *adapter = netdev_priv(to_net_dev(dev)); | ||
5430 | |||
5431 | return sprintf(buf, "%d\n", adapter->vfs_allocated_count); | ||
5432 | } | ||
5433 | |||
5434 | static ssize_t igb_set_num_vfs(struct device *dev, | ||
5435 | struct device_attribute *attr, | ||
5436 | const char *buf, size_t count) | ||
5437 | { | ||
5438 | struct net_device *netdev = to_net_dev(dev); | ||
5439 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
5440 | struct e1000_hw *hw = &adapter->hw; | ||
5441 | struct pci_dev *pdev = adapter->pdev; | ||
5442 | unsigned int num_vfs, i; | ||
5443 | unsigned char mac_addr[ETH_ALEN]; | ||
5444 | int err; | ||
5445 | |||
5446 | sscanf(buf, "%u", &num_vfs); | ||
5447 | |||
5448 | if (num_vfs > 7) | ||
5449 | num_vfs = 7; | ||
5450 | |||
5451 | /* value unchanged do nothing */ | ||
5452 | if (num_vfs == adapter->vfs_allocated_count) | ||
5453 | return count; | ||
5454 | |||
5455 | if (netdev->flags & IFF_UP) | ||
5456 | igb_close(netdev); | ||
5457 | |||
5458 | igb_reset_interrupt_capability(adapter); | ||
5459 | igb_free_queues(adapter); | ||
5460 | adapter->tx_ring = NULL; | ||
5461 | adapter->rx_ring = NULL; | ||
5462 | adapter->vfs_allocated_count = 0; | ||
5463 | |||
5464 | /* reclaim resources allocated to VFs since we are changing count */ | ||
5465 | if (adapter->vf_data) { | ||
5466 | /* disable iov and allow time for transactions to clear */ | ||
5467 | pci_disable_sriov(pdev); | ||
5468 | msleep(500); | ||
5469 | |||
5470 | kfree(adapter->vf_data); | ||
5471 | adapter->vf_data = NULL; | ||
5472 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); | ||
5473 | msleep(100); | ||
5474 | dev_info(&pdev->dev, "IOV Disabled\n"); | ||
5475 | } | ||
5476 | |||
5477 | if (num_vfs) { | ||
5478 | adapter->vf_data = kcalloc(num_vfs, | ||
5479 | sizeof(struct vf_data_storage), | ||
5480 | GFP_KERNEL); | ||
5481 | if (!adapter->vf_data) { | ||
5482 | dev_err(&pdev->dev, "Could not allocate VF private " | ||
5483 | "data - IOV enable failed\n"); | ||
5484 | } else { | ||
5485 | err = pci_enable_sriov(pdev, num_vfs); | ||
5486 | if (!err) { | ||
5487 | adapter->vfs_allocated_count = num_vfs; | ||
5488 | dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs); | ||
5489 | for (i = 0; i < adapter->vfs_allocated_count; i++) { | ||
5490 | random_ether_addr(mac_addr); | ||
5491 | igb_set_vf_mac(adapter, i, mac_addr); | ||
5492 | } | ||
5493 | } else { | ||
5494 | kfree(adapter->vf_data); | ||
5495 | adapter->vf_data = NULL; | ||
5496 | } | ||
5497 | } | ||
5498 | } | ||
5499 | |||
5500 | igb_set_interrupt_capability(adapter); | ||
5501 | igb_alloc_queues(adapter); | ||
5502 | igb_reset(adapter); | ||
5503 | |||
5504 | if (netdev->flags & IFF_UP) | ||
5505 | igb_open(netdev); | ||
5506 | |||
5507 | return count; | ||
5508 | } | ||
5509 | #endif /* CONFIG_PCI_IOV */ | ||
5510 | /* igb_main.c */ | 5466 | /* igb_main.c */ |
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile new file mode 100644 index 000000000000..c2f150d8f2d9 --- /dev/null +++ b/drivers/net/igbvf/Makefile | |||
@@ -0,0 +1,38 @@ | |||
1 | ################################################################################ | ||
2 | # | ||
3 | # Intel(R) 82576 Virtual Function Linux driver | ||
4 | # Copyright(c) 2009 Intel Corporation. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify it | ||
7 | # under the terms and conditions of the GNU General Public License, | ||
8 | # version 2, as published by the Free Software Foundation. | ||
9 | # | ||
10 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | # more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along with | ||
16 | # this program; if not, write to the Free Software Foundation, Inc., | ||
17 | # 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | # | ||
19 | # The full GNU General Public License is included in this distribution in | ||
20 | # the file called "COPYING". | ||
21 | # | ||
22 | # Contact Information: | ||
23 | # e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | # | ||
26 | ################################################################################ | ||
27 | |||
28 | # | ||
29 | # Makefile for the Intel(R) 82576 VF ethernet driver | ||
30 | # | ||
31 | |||
32 | obj-$(CONFIG_IGBVF) += igbvf.o | ||
33 | |||
34 | igbvf-objs := vf.o \ | ||
35 | mbx.o \ | ||
36 | ethtool.o \ | ||
37 | netdev.o | ||
38 | |||
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h new file mode 100644 index 000000000000..88a47537518a --- /dev/null +++ b/drivers/net/igbvf/defines.h | |||
@@ -0,0 +1,125 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel(R) 82576 Virtual Function Linux driver | ||
4 | Copyright(c) 1999 - 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #ifndef _E1000_DEFINES_H_ | ||
29 | #define _E1000_DEFINES_H_ | ||
30 | |||
31 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ | ||
32 | #define REQ_TX_DESCRIPTOR_MULTIPLE 8 | ||
33 | #define REQ_RX_DESCRIPTOR_MULTIPLE 8 | ||
34 | |||
35 | /* IVAR valid bit */ | ||
36 | #define E1000_IVAR_VALID 0x80 | ||
37 | |||
38 | /* Receive Descriptor bit definitions */ | ||
39 | #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ | ||
40 | #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ | ||
41 | #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ | ||
42 | #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ | ||
43 | #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ | ||
44 | #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ | ||
45 | #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ | ||
46 | #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ | ||
47 | #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ | ||
48 | |||
49 | #define E1000_RXDEXT_STATERR_CE 0x01000000 | ||
50 | #define E1000_RXDEXT_STATERR_SE 0x02000000 | ||
51 | #define E1000_RXDEXT_STATERR_SEQ 0x04000000 | ||
52 | #define E1000_RXDEXT_STATERR_CXE 0x10000000 | ||
53 | #define E1000_RXDEXT_STATERR_TCPE 0x20000000 | ||
54 | #define E1000_RXDEXT_STATERR_IPE 0x40000000 | ||
55 | #define E1000_RXDEXT_STATERR_RXE 0x80000000 | ||
56 | |||
57 | |||
58 | /* Same mask, but for extended and packet split descriptors */ | ||
59 | #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ | ||
60 | E1000_RXDEXT_STATERR_CE | \ | ||
61 | E1000_RXDEXT_STATERR_SE | \ | ||
62 | E1000_RXDEXT_STATERR_SEQ | \ | ||
63 | E1000_RXDEXT_STATERR_CXE | \ | ||
64 | E1000_RXDEXT_STATERR_RXE) | ||
65 | |||
66 | /* Device Control */ | ||
67 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ | ||
68 | |||
69 | /* Device Status */ | ||
70 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ | ||
71 | #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ | ||
72 | #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ | ||
73 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ | ||
74 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ | ||
75 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ | ||
76 | |||
77 | #define SPEED_10 10 | ||
78 | #define SPEED_100 100 | ||
79 | #define SPEED_1000 1000 | ||
80 | #define HALF_DUPLEX 1 | ||
81 | #define FULL_DUPLEX 2 | ||
82 | |||
83 | /* Transmit Descriptor bit definitions */ | ||
84 | #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ | ||
85 | #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ | ||
86 | #define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ | ||
87 | #define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ | ||
88 | |||
89 | #define MAX_JUMBO_FRAME_SIZE 0x3F00 | ||
90 | |||
91 | /* 802.1q VLAN Packet Size */ | ||
92 | #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ | ||
93 | |||
94 | /* Error Codes */ | ||
95 | #define E1000_SUCCESS 0 | ||
96 | #define E1000_ERR_CONFIG 3 | ||
97 | #define E1000_ERR_MAC_INIT 5 | ||
98 | #define E1000_ERR_MBX 15 | ||
99 | |||
100 | #ifndef ETH_ADDR_LEN | ||
101 | #define ETH_ADDR_LEN 6 | ||
102 | #endif | ||
103 | |||
104 | /* SRRCTL bit definitions */ | ||
105 | #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ | ||
106 | #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 | ||
107 | #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ | ||
108 | #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 | ||
109 | #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 | ||
110 | #define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 | ||
111 | #define E1000_SRRCTL_DROP_EN 0x80000000 | ||
112 | |||
113 | #define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F | ||
114 | #define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 | ||
115 | |||
116 | /* Additional Descriptor Control definitions */ | ||
117 | #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ | ||
118 | #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ | ||
119 | |||
120 | /* Direct Cache Access (DCA) definitions */ | ||
121 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ | ||
122 | |||
123 | #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ | ||
124 | |||
125 | #endif /* _E1000_DEFINES_H_ */ | ||
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c new file mode 100644 index 000000000000..1dcaa6905312 --- /dev/null +++ b/drivers/net/igbvf/ethtool.c | |||
@@ -0,0 +1,540 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel(R) 82576 Virtual Function Linux driver | ||
4 | Copyright(c) 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | /* ethtool support for igbvf */ | ||
29 | |||
30 | #include <linux/netdevice.h> | ||
31 | #include <linux/ethtool.h> | ||
32 | #include <linux/pci.h> | ||
33 | #include <linux/vmalloc.h> | ||
34 | #include <linux/delay.h> | ||
35 | |||
36 | #include "igbvf.h" | ||
37 | #include <linux/if_vlan.h> | ||
38 | |||
39 | |||
40 | struct igbvf_stats { | ||
41 | char stat_string[ETH_GSTRING_LEN]; | ||
42 | int sizeof_stat; | ||
43 | int stat_offset; | ||
44 | int base_stat_offset; | ||
45 | }; | ||
46 | |||
47 | #define IGBVF_STAT(current, base) \ | ||
48 | sizeof(((struct igbvf_adapter *)0)->current), \ | ||
49 | offsetof(struct igbvf_adapter, current), \ | ||
50 | offsetof(struct igbvf_adapter, base) | ||
51 | |||
52 | static const struct igbvf_stats igbvf_gstrings_stats[] = { | ||
53 | { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) }, | ||
54 | { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) }, | ||
55 | { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) }, | ||
56 | { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) }, | ||
57 | { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) }, | ||
58 | { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) }, | ||
59 | { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) }, | ||
60 | { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) }, | ||
61 | { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) }, | ||
62 | { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) }, | ||
63 | { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) }, | ||
64 | { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) }, | ||
65 | { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) }, | ||
66 | }; | ||
67 | |||
68 | #define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats) | ||
69 | |||
70 | static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { | ||
71 | "Link test (on/offline)" | ||
72 | }; | ||
73 | |||
74 | #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) | ||
75 | |||
76 | static int igbvf_get_settings(struct net_device *netdev, | ||
77 | struct ethtool_cmd *ecmd) | ||
78 | { | ||
79 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
80 | struct e1000_hw *hw = &adapter->hw; | ||
81 | u32 status; | ||
82 | |||
83 | ecmd->supported = SUPPORTED_1000baseT_Full; | ||
84 | |||
85 | ecmd->advertising = ADVERTISED_1000baseT_Full; | ||
86 | |||
87 | ecmd->port = -1; | ||
88 | ecmd->transceiver = XCVR_DUMMY1; | ||
89 | |||
90 | status = er32(STATUS); | ||
91 | if (status & E1000_STATUS_LU) { | ||
92 | if (status & E1000_STATUS_SPEED_1000) | ||
93 | ecmd->speed = 1000; | ||
94 | else if (status & E1000_STATUS_SPEED_100) | ||
95 | ecmd->speed = 100; | ||
96 | else | ||
97 | ecmd->speed = 10; | ||
98 | |||
99 | if (status & E1000_STATUS_FD) | ||
100 | ecmd->duplex = DUPLEX_FULL; | ||
101 | else | ||
102 | ecmd->duplex = DUPLEX_HALF; | ||
103 | } else { | ||
104 | ecmd->speed = -1; | ||
105 | ecmd->duplex = -1; | ||
106 | } | ||
107 | |||
108 | ecmd->autoneg = AUTONEG_DISABLE; | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static u32 igbvf_get_link(struct net_device *netdev) | ||
114 | { | ||
115 | return netif_carrier_ok(netdev); | ||
116 | } | ||
117 | |||
118 | static int igbvf_set_settings(struct net_device *netdev, | ||
119 | struct ethtool_cmd *ecmd) | ||
120 | { | ||
121 | return -EOPNOTSUPP; | ||
122 | } | ||
123 | |||
124 | static void igbvf_get_pauseparam(struct net_device *netdev, | ||
125 | struct ethtool_pauseparam *pause) | ||
126 | { | ||
127 | return; | ||
128 | } | ||
129 | |||
130 | static int igbvf_set_pauseparam(struct net_device *netdev, | ||
131 | struct ethtool_pauseparam *pause) | ||
132 | { | ||
133 | return -EOPNOTSUPP; | ||
134 | } | ||
135 | |||
136 | static u32 igbvf_get_tx_csum(struct net_device *netdev) | ||
137 | { | ||
138 | return ((netdev->features & NETIF_F_IP_CSUM) != 0); | ||
139 | } | ||
140 | |||
141 | static int igbvf_set_tx_csum(struct net_device *netdev, u32 data) | ||
142 | { | ||
143 | if (data) | ||
144 | netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | ||
145 | else | ||
146 | netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int igbvf_set_tso(struct net_device *netdev, u32 data) | ||
151 | { | ||
152 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
153 | int i; | ||
154 | struct net_device *v_netdev; | ||
155 | |||
156 | if (data) { | ||
157 | netdev->features |= NETIF_F_TSO; | ||
158 | netdev->features |= NETIF_F_TSO6; | ||
159 | } else { | ||
160 | netdev->features &= ~NETIF_F_TSO; | ||
161 | netdev->features &= ~NETIF_F_TSO6; | ||
162 | /* disable TSO on all VLANs if they're present */ | ||
163 | if (!adapter->vlgrp) | ||
164 | goto tso_out; | ||
165 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | ||
166 | v_netdev = vlan_group_get_device(adapter->vlgrp, i); | ||
167 | if (!v_netdev) | ||
168 | continue; | ||
169 | |||
170 | v_netdev->features &= ~NETIF_F_TSO; | ||
171 | v_netdev->features &= ~NETIF_F_TSO6; | ||
172 | vlan_group_set_device(adapter->vlgrp, i, v_netdev); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | tso_out: | ||
177 | dev_info(&adapter->pdev->dev, "TSO is %s\n", | ||
178 | data ? "Enabled" : "Disabled"); | ||
179 | adapter->flags |= FLAG_TSO_FORCE; | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static u32 igbvf_get_msglevel(struct net_device *netdev) | ||
184 | { | ||
185 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
186 | return adapter->msg_enable; | ||
187 | } | ||
188 | |||
189 | static void igbvf_set_msglevel(struct net_device *netdev, u32 data) | ||
190 | { | ||
191 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
192 | adapter->msg_enable = data; | ||
193 | } | ||
194 | |||
195 | static int igbvf_get_regs_len(struct net_device *netdev) | ||
196 | { | ||
197 | #define IGBVF_REGS_LEN 8 | ||
198 | return IGBVF_REGS_LEN * sizeof(u32); | ||
199 | } | ||
200 | |||
201 | static void igbvf_get_regs(struct net_device *netdev, | ||
202 | struct ethtool_regs *regs, void *p) | ||
203 | { | ||
204 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
205 | struct e1000_hw *hw = &adapter->hw; | ||
206 | u32 *regs_buff = p; | ||
207 | u8 revision_id; | ||
208 | |||
209 | memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); | ||
210 | |||
211 | pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); | ||
212 | |||
213 | regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device; | ||
214 | |||
215 | regs_buff[0] = er32(CTRL); | ||
216 | regs_buff[1] = er32(STATUS); | ||
217 | |||
218 | regs_buff[2] = er32(RDLEN(0)); | ||
219 | regs_buff[3] = er32(RDH(0)); | ||
220 | regs_buff[4] = er32(RDT(0)); | ||
221 | |||
222 | regs_buff[5] = er32(TDLEN(0)); | ||
223 | regs_buff[6] = er32(TDH(0)); | ||
224 | regs_buff[7] = er32(TDT(0)); | ||
225 | } | ||
226 | |||
227 | static int igbvf_get_eeprom_len(struct net_device *netdev) | ||
228 | { | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | static int igbvf_get_eeprom(struct net_device *netdev, | ||
233 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
234 | { | ||
235 | return -EOPNOTSUPP; | ||
236 | } | ||
237 | |||
238 | static int igbvf_set_eeprom(struct net_device *netdev, | ||
239 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
240 | { | ||
241 | return -EOPNOTSUPP; | ||
242 | } | ||
243 | |||
244 | static void igbvf_get_drvinfo(struct net_device *netdev, | ||
245 | struct ethtool_drvinfo *drvinfo) | ||
246 | { | ||
247 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
248 | char firmware_version[32] = "N/A"; | ||
249 | |||
250 | strncpy(drvinfo->driver, igbvf_driver_name, 32); | ||
251 | strncpy(drvinfo->version, igbvf_driver_version, 32); | ||
252 | strncpy(drvinfo->fw_version, firmware_version, 32); | ||
253 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); | ||
254 | drvinfo->regdump_len = igbvf_get_regs_len(netdev); | ||
255 | drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); | ||
256 | } | ||
257 | |||
258 | static void igbvf_get_ringparam(struct net_device *netdev, | ||
259 | struct ethtool_ringparam *ring) | ||
260 | { | ||
261 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
262 | struct igbvf_ring *tx_ring = adapter->tx_ring; | ||
263 | struct igbvf_ring *rx_ring = adapter->rx_ring; | ||
264 | |||
265 | ring->rx_max_pending = IGBVF_MAX_RXD; | ||
266 | ring->tx_max_pending = IGBVF_MAX_TXD; | ||
267 | ring->rx_mini_max_pending = 0; | ||
268 | ring->rx_jumbo_max_pending = 0; | ||
269 | ring->rx_pending = rx_ring->count; | ||
270 | ring->tx_pending = tx_ring->count; | ||
271 | ring->rx_mini_pending = 0; | ||
272 | ring->rx_jumbo_pending = 0; | ||
273 | } | ||
274 | |||
275 | static int igbvf_set_ringparam(struct net_device *netdev, | ||
276 | struct ethtool_ringparam *ring) | ||
277 | { | ||
278 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
279 | struct igbvf_ring *temp_ring; | ||
280 | int err; | ||
281 | u32 new_rx_count, new_tx_count; | ||
282 | |||
283 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | ||
284 | return -EINVAL; | ||
285 | |||
286 | new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); | ||
287 | new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); | ||
288 | new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); | ||
289 | |||
290 | new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); | ||
291 | new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); | ||
292 | new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); | ||
293 | |||
294 | if ((new_tx_count == adapter->tx_ring->count) && | ||
295 | (new_rx_count == adapter->rx_ring->count)) { | ||
296 | /* nothing to do */ | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | temp_ring = vmalloc(sizeof(struct igbvf_ring)); | ||
301 | if (!temp_ring) | ||
302 | return -ENOMEM; | ||
303 | |||
304 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) | ||
305 | msleep(1); | ||
306 | |||
307 | if (netif_running(adapter->netdev)) | ||
308 | igbvf_down(adapter); | ||
309 | |||
310 | /* | ||
311 | * We can't just free everything and then setup again, | ||
312 | * because the ISRs in MSI-X mode get passed pointers | ||
313 | * to the tx and rx ring structs. | ||
314 | */ | ||
315 | if (new_tx_count != adapter->tx_ring->count) { | ||
316 | memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); | ||
317 | |||
318 | temp_ring->count = new_tx_count; | ||
319 | err = igbvf_setup_tx_resources(adapter, temp_ring); | ||
320 | if (err) | ||
321 | goto err_setup; | ||
322 | |||
323 | igbvf_free_tx_resources(adapter->tx_ring); | ||
324 | |||
325 | memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring)); | ||
326 | } | ||
327 | |||
328 | if (new_rx_count != adapter->rx_ring->count) { | ||
329 | memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring)); | ||
330 | |||
331 | temp_ring->count = new_rx_count; | ||
332 | err = igbvf_setup_rx_resources(adapter, temp_ring); | ||
333 | if (err) | ||
334 | goto err_setup; | ||
335 | |||
336 | igbvf_free_rx_resources(adapter->rx_ring); | ||
337 | |||
338 | memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); | ||
339 | } | ||
340 | |||
341 | err = 0; | ||
342 | err_setup: | ||
343 | if (netif_running(adapter->netdev)) | ||
344 | igbvf_up(adapter); | ||
345 | |||
346 | clear_bit(__IGBVF_RESETTING, &adapter->state); | ||
347 | vfree(temp_ring); | ||
348 | return err; | ||
349 | } | ||
350 | |||
351 | static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) | ||
352 | { | ||
353 | struct e1000_hw *hw = &adapter->hw; | ||
354 | *data = 0; | ||
355 | |||
356 | hw->mac.ops.check_for_link(hw); | ||
357 | |||
358 | if (!(er32(STATUS) & E1000_STATUS_LU)) | ||
359 | *data = 1; | ||
360 | |||
361 | return *data; | ||
362 | } | ||
363 | |||
364 | static int igbvf_get_self_test_count(struct net_device *netdev) | ||
365 | { | ||
366 | return IGBVF_TEST_LEN; | ||
367 | } | ||
368 | |||
369 | static int igbvf_get_stats_count(struct net_device *netdev) | ||
370 | { | ||
371 | return IGBVF_GLOBAL_STATS_LEN; | ||
372 | } | ||
373 | |||
374 | static void igbvf_diag_test(struct net_device *netdev, | ||
375 | struct ethtool_test *eth_test, u64 *data) | ||
376 | { | ||
377 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
378 | |||
379 | set_bit(__IGBVF_TESTING, &adapter->state); | ||
380 | |||
381 | /* | ||
382 | * Link test performed before hardware reset so autoneg doesn't | ||
383 | * interfere with test result | ||
384 | */ | ||
385 | if (igbvf_link_test(adapter, &data[0])) | ||
386 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
387 | |||
388 | clear_bit(__IGBVF_TESTING, &adapter->state); | ||
389 | msleep_interruptible(4 * 1000); | ||
390 | } | ||
391 | |||
392 | static void igbvf_get_wol(struct net_device *netdev, | ||
393 | struct ethtool_wolinfo *wol) | ||
394 | { | ||
395 | wol->supported = 0; | ||
396 | wol->wolopts = 0; | ||
397 | |||
398 | return; | ||
399 | } | ||
400 | |||
401 | static int igbvf_set_wol(struct net_device *netdev, | ||
402 | struct ethtool_wolinfo *wol) | ||
403 | { | ||
404 | return -EOPNOTSUPP; | ||
405 | } | ||
406 | |||
407 | static int igbvf_phys_id(struct net_device *netdev, u32 data) | ||
408 | { | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | static int igbvf_get_coalesce(struct net_device *netdev, | ||
413 | struct ethtool_coalesce *ec) | ||
414 | { | ||
415 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
416 | |||
417 | if (adapter->itr_setting <= 3) | ||
418 | ec->rx_coalesce_usecs = adapter->itr_setting; | ||
419 | else | ||
420 | ec->rx_coalesce_usecs = adapter->itr_setting >> 2; | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | static int igbvf_set_coalesce(struct net_device *netdev, | ||
426 | struct ethtool_coalesce *ec) | ||
427 | { | ||
428 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
429 | struct e1000_hw *hw = &adapter->hw; | ||
430 | |||
431 | if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) || | ||
432 | ((ec->rx_coalesce_usecs > 3) && | ||
433 | (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) || | ||
434 | (ec->rx_coalesce_usecs == 2)) | ||
435 | return -EINVAL; | ||
436 | |||
437 | /* convert to rate of irq's per second */ | ||
438 | if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { | ||
439 | adapter->itr = IGBVF_START_ITR; | ||
440 | adapter->itr_setting = ec->rx_coalesce_usecs; | ||
441 | } else { | ||
442 | adapter->itr = ec->rx_coalesce_usecs << 2; | ||
443 | adapter->itr_setting = adapter->itr; | ||
444 | } | ||
445 | |||
446 | writel(adapter->itr, | ||
447 | hw->hw_addr + adapter->rx_ring[0].itr_register); | ||
448 | |||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static int igbvf_nway_reset(struct net_device *netdev) | ||
453 | { | ||
454 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
455 | if (netif_running(netdev)) | ||
456 | igbvf_reinit_locked(adapter); | ||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | |||
461 | static void igbvf_get_ethtool_stats(struct net_device *netdev, | ||
462 | struct ethtool_stats *stats, | ||
463 | u64 *data) | ||
464 | { | ||
465 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
466 | int i; | ||
467 | |||
468 | igbvf_update_stats(adapter); | ||
469 | for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { | ||
470 | char *p = (char *)adapter + | ||
471 | igbvf_gstrings_stats[i].stat_offset; | ||
472 | char *b = (char *)adapter + | ||
473 | igbvf_gstrings_stats[i].base_stat_offset; | ||
474 | data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == | ||
475 | sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : | ||
476 | (*(u32 *)p - *(u32 *)b)); | ||
477 | } | ||
478 | |||
479 | } | ||
480 | |||
481 | static void igbvf_get_strings(struct net_device *netdev, u32 stringset, | ||
482 | u8 *data) | ||
483 | { | ||
484 | u8 *p = data; | ||
485 | int i; | ||
486 | |||
487 | switch (stringset) { | ||
488 | case ETH_SS_TEST: | ||
489 | memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test)); | ||
490 | break; | ||
491 | case ETH_SS_STATS: | ||
492 | for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { | ||
493 | memcpy(p, igbvf_gstrings_stats[i].stat_string, | ||
494 | ETH_GSTRING_LEN); | ||
495 | p += ETH_GSTRING_LEN; | ||
496 | } | ||
497 | break; | ||
498 | } | ||
499 | } | ||
500 | |||
501 | static const struct ethtool_ops igbvf_ethtool_ops = { | ||
502 | .get_settings = igbvf_get_settings, | ||
503 | .set_settings = igbvf_set_settings, | ||
504 | .get_drvinfo = igbvf_get_drvinfo, | ||
505 | .get_regs_len = igbvf_get_regs_len, | ||
506 | .get_regs = igbvf_get_regs, | ||
507 | .get_wol = igbvf_get_wol, | ||
508 | .set_wol = igbvf_set_wol, | ||
509 | .get_msglevel = igbvf_get_msglevel, | ||
510 | .set_msglevel = igbvf_set_msglevel, | ||
511 | .nway_reset = igbvf_nway_reset, | ||
512 | .get_link = igbvf_get_link, | ||
513 | .get_eeprom_len = igbvf_get_eeprom_len, | ||
514 | .get_eeprom = igbvf_get_eeprom, | ||
515 | .set_eeprom = igbvf_set_eeprom, | ||
516 | .get_ringparam = igbvf_get_ringparam, | ||
517 | .set_ringparam = igbvf_set_ringparam, | ||
518 | .get_pauseparam = igbvf_get_pauseparam, | ||
519 | .set_pauseparam = igbvf_set_pauseparam, | ||
520 | .get_tx_csum = igbvf_get_tx_csum, | ||
521 | .set_tx_csum = igbvf_set_tx_csum, | ||
522 | .get_sg = ethtool_op_get_sg, | ||
523 | .set_sg = ethtool_op_set_sg, | ||
524 | .get_tso = ethtool_op_get_tso, | ||
525 | .set_tso = igbvf_set_tso, | ||
526 | .self_test = igbvf_diag_test, | ||
527 | .get_strings = igbvf_get_strings, | ||
528 | .phys_id = igbvf_phys_id, | ||
529 | .get_ethtool_stats = igbvf_get_ethtool_stats, | ||
530 | .self_test_count = igbvf_get_self_test_count, | ||
531 | .get_stats_count = igbvf_get_stats_count, | ||
532 | .get_coalesce = igbvf_get_coalesce, | ||
533 | .set_coalesce = igbvf_set_coalesce, | ||
534 | }; | ||
535 | |||
536 | void igbvf_set_ethtool_ops(struct net_device *netdev) | ||
537 | { | ||
538 | /* have to "undeclare" const on this struct to remove warnings */ | ||
539 | SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops); | ||
540 | } | ||
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h new file mode 100644 index 000000000000..4bff35e46871 --- /dev/null +++ b/drivers/net/igbvf/igbvf.h | |||
@@ -0,0 +1,332 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel(R) 82576 Virtual Function Linux driver | ||
4 | Copyright(c) 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | /* Linux PRO/1000 Ethernet Driver main header file */ | ||
29 | |||
30 | #ifndef _IGBVF_H_ | ||
31 | #define _IGBVF_H_ | ||
32 | |||
33 | #include <linux/types.h> | ||
34 | #include <linux/timer.h> | ||
35 | #include <linux/io.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | |||
38 | |||
39 | #include "vf.h" | ||
40 | |||
41 | /* Forward declarations */ | ||
42 | struct igbvf_info; | ||
43 | struct igbvf_adapter; | ||
44 | |||
45 | /* Interrupt defines */ | ||
46 | #define IGBVF_START_ITR 648 /* ~6000 ints/sec */ | ||
47 | |||
48 | /* Interrupt modes, as used by the IntMode paramter */ | ||
49 | #define IGBVF_INT_MODE_LEGACY 0 | ||
50 | #define IGBVF_INT_MODE_MSI 1 | ||
51 | #define IGBVF_INT_MODE_MSIX 2 | ||
52 | |||
53 | /* Tx/Rx descriptor defines */ | ||
54 | #define IGBVF_DEFAULT_TXD 256 | ||
55 | #define IGBVF_MAX_TXD 4096 | ||
56 | #define IGBVF_MIN_TXD 80 | ||
57 | |||
58 | #define IGBVF_DEFAULT_RXD 256 | ||
59 | #define IGBVF_MAX_RXD 4096 | ||
60 | #define IGBVF_MIN_RXD 80 | ||
61 | |||
62 | #define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ | ||
63 | #define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ | ||
64 | |||
65 | /* RX descriptor control thresholds. | ||
66 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of | ||
67 | * descriptors available in its onboard memory. | ||
68 | * Setting this to 0 disables RX descriptor prefetch. | ||
69 | * HTHRESH - MAC will only prefetch if there are at least this many descriptors | ||
70 | * available in host memory. | ||
71 | * If PTHRESH is 0, this should also be 0. | ||
72 | * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back | ||
73 | * descriptors until either it has this many to write back, or the | ||
74 | * ITR timer expires. | ||
75 | */ | ||
76 | #define IGBVF_RX_PTHRESH 16 | ||
77 | #define IGBVF_RX_HTHRESH 8 | ||
78 | #define IGBVF_RX_WTHRESH 1 | ||
79 | |||
80 | /* this is the size past which hardware will drop packets when setting LPE=0 */ | ||
81 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | ||
82 | |||
83 | #define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ | ||
84 | |||
85 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ | ||
86 | #define IGBVF_TX_QUEUE_WAKE 32 | ||
87 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | ||
88 | #define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | ||
89 | |||
90 | #define AUTO_ALL_MODES 0 | ||
91 | #define IGBVF_EEPROM_APME 0x0400 | ||
92 | |||
93 | #define IGBVF_MNG_VLAN_NONE (-1) | ||
94 | |||
95 | /* Number of packet split data buffers (not including the header buffer) */ | ||
96 | #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) | ||
97 | |||
98 | enum igbvf_boards { | ||
99 | board_vf, | ||
100 | }; | ||
101 | |||
102 | struct igbvf_queue_stats { | ||
103 | u64 packets; | ||
104 | u64 bytes; | ||
105 | }; | ||
106 | |||
107 | /* | ||
108 | * wrappers around a pointer to a socket buffer, | ||
109 | * so a DMA handle can be stored along with the buffer | ||
110 | */ | ||
111 | struct igbvf_buffer { | ||
112 | dma_addr_t dma; | ||
113 | struct sk_buff *skb; | ||
114 | union { | ||
115 | /* Tx */ | ||
116 | struct { | ||
117 | unsigned long time_stamp; | ||
118 | u16 length; | ||
119 | u16 next_to_watch; | ||
120 | }; | ||
121 | /* Rx */ | ||
122 | struct { | ||
123 | struct page *page; | ||
124 | u64 page_dma; | ||
125 | unsigned int page_offset; | ||
126 | }; | ||
127 | }; | ||
128 | struct page *page; | ||
129 | }; | ||
130 | |||
131 | union igbvf_desc { | ||
132 | union e1000_adv_rx_desc rx_desc; | ||
133 | union e1000_adv_tx_desc tx_desc; | ||
134 | struct e1000_adv_tx_context_desc tx_context_desc; | ||
135 | }; | ||
136 | |||
137 | struct igbvf_ring { | ||
138 | struct igbvf_adapter *adapter; /* backlink */ | ||
139 | union igbvf_desc *desc; /* pointer to ring memory */ | ||
140 | dma_addr_t dma; /* phys address of ring */ | ||
141 | unsigned int size; /* length of ring in bytes */ | ||
142 | unsigned int count; /* number of desc. in ring */ | ||
143 | |||
144 | u16 next_to_use; | ||
145 | u16 next_to_clean; | ||
146 | |||
147 | u16 head; | ||
148 | u16 tail; | ||
149 | |||
150 | /* array of buffer information structs */ | ||
151 | struct igbvf_buffer *buffer_info; | ||
152 | struct napi_struct napi; | ||
153 | |||
154 | char name[IFNAMSIZ + 5]; | ||
155 | u32 eims_value; | ||
156 | u32 itr_val; | ||
157 | u16 itr_register; | ||
158 | int set_itr; | ||
159 | |||
160 | struct sk_buff *rx_skb_top; | ||
161 | |||
162 | struct igbvf_queue_stats stats; | ||
163 | }; | ||
164 | |||
165 | /* board specific private data structure */ | ||
166 | struct igbvf_adapter { | ||
167 | struct timer_list watchdog_timer; | ||
168 | struct timer_list blink_timer; | ||
169 | |||
170 | struct work_struct reset_task; | ||
171 | struct work_struct watchdog_task; | ||
172 | |||
173 | const struct igbvf_info *ei; | ||
174 | |||
175 | struct vlan_group *vlgrp; | ||
176 | u32 bd_number; | ||
177 | u32 rx_buffer_len; | ||
178 | u32 polling_interval; | ||
179 | u16 mng_vlan_id; | ||
180 | u16 link_speed; | ||
181 | u16 link_duplex; | ||
182 | |||
183 | spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ | ||
184 | |||
185 | /* track device up/down/testing state */ | ||
186 | unsigned long state; | ||
187 | |||
188 | /* Interrupt Throttle Rate */ | ||
189 | u32 itr; | ||
190 | u32 itr_setting; | ||
191 | u16 tx_itr; | ||
192 | u16 rx_itr; | ||
193 | |||
194 | /* | ||
195 | * Tx | ||
196 | */ | ||
197 | struct igbvf_ring *tx_ring /* One per active queue */ | ||
198 | ____cacheline_aligned_in_smp; | ||
199 | |||
200 | unsigned long tx_queue_len; | ||
201 | unsigned int restart_queue; | ||
202 | u32 txd_cmd; | ||
203 | |||
204 | bool detect_tx_hung; | ||
205 | u8 tx_timeout_factor; | ||
206 | |||
207 | u32 tx_int_delay; | ||
208 | u32 tx_abs_int_delay; | ||
209 | |||
210 | unsigned int total_tx_bytes; | ||
211 | unsigned int total_tx_packets; | ||
212 | unsigned int total_rx_bytes; | ||
213 | unsigned int total_rx_packets; | ||
214 | |||
215 | /* Tx stats */ | ||
216 | u32 tx_timeout_count; | ||
217 | u32 tx_fifo_head; | ||
218 | u32 tx_head_addr; | ||
219 | u32 tx_fifo_size; | ||
220 | u32 tx_dma_failed; | ||
221 | |||
222 | /* | ||
223 | * Rx | ||
224 | */ | ||
225 | struct igbvf_ring *rx_ring; | ||
226 | |||
227 | u32 rx_int_delay; | ||
228 | u32 rx_abs_int_delay; | ||
229 | |||
230 | /* Rx stats */ | ||
231 | u64 hw_csum_err; | ||
232 | u64 hw_csum_good; | ||
233 | u64 rx_hdr_split; | ||
234 | u32 alloc_rx_buff_failed; | ||
235 | u32 rx_dma_failed; | ||
236 | |||
237 | unsigned int rx_ps_hdr_size; | ||
238 | u32 max_frame_size; | ||
239 | u32 min_frame_size; | ||
240 | |||
241 | /* OS defined structs */ | ||
242 | struct net_device *netdev; | ||
243 | struct pci_dev *pdev; | ||
244 | struct net_device_stats net_stats; | ||
245 | spinlock_t stats_lock; /* prevent concurrent stats updates */ | ||
246 | |||
247 | /* structs defined in e1000_hw.h */ | ||
248 | struct e1000_hw hw; | ||
249 | |||
250 | /* The VF counters don't clear on read so we have to get a base | ||
251 | * count on driver start up and always subtract that base on | ||
252 | * on the first update, thus the flag.. | ||
253 | */ | ||
254 | struct e1000_vf_stats stats; | ||
255 | u64 zero_base; | ||
256 | |||
257 | struct igbvf_ring test_tx_ring; | ||
258 | struct igbvf_ring test_rx_ring; | ||
259 | u32 test_icr; | ||
260 | |||
261 | u32 msg_enable; | ||
262 | struct msix_entry *msix_entries; | ||
263 | int int_mode; | ||
264 | u32 eims_enable_mask; | ||
265 | u32 eims_other; | ||
266 | u32 int_counter0; | ||
267 | u32 int_counter1; | ||
268 | |||
269 | u32 eeprom_wol; | ||
270 | u32 wol; | ||
271 | u32 pba; | ||
272 | |||
273 | bool fc_autoneg; | ||
274 | |||
275 | unsigned long led_status; | ||
276 | |||
277 | unsigned int flags; | ||
278 | }; | ||
279 | |||
280 | struct igbvf_info { | ||
281 | enum e1000_mac_type mac; | ||
282 | unsigned int flags; | ||
283 | u32 pba; | ||
284 | void (*init_ops)(struct e1000_hw *); | ||
285 | s32 (*get_variants)(struct igbvf_adapter *); | ||
286 | }; | ||
287 | |||
288 | /* hardware capability, feature, and workaround flags */ | ||
289 | #define FLAG_HAS_HW_VLAN_FILTER (1 << 0) | ||
290 | #define FLAG_HAS_JUMBO_FRAMES (1 << 1) | ||
291 | #define FLAG_MSI_ENABLED (1 << 2) | ||
292 | #define FLAG_RX_CSUM_ENABLED (1 << 3) | ||
293 | #define FLAG_TSO_FORCE (1 << 4) | ||
294 | |||
295 | #define IGBVF_RX_DESC_ADV(R, i) \ | ||
296 | (&((((R).desc))[i].rx_desc)) | ||
297 | #define IGBVF_TX_DESC_ADV(R, i) \ | ||
298 | (&((((R).desc))[i].tx_desc)) | ||
299 | #define IGBVF_TX_CTXTDESC_ADV(R, i) \ | ||
300 | (&((((R).desc))[i].tx_context_desc)) | ||
301 | |||
302 | enum igbvf_state_t { | ||
303 | __IGBVF_TESTING, | ||
304 | __IGBVF_RESETTING, | ||
305 | __IGBVF_DOWN | ||
306 | }; | ||
307 | |||
308 | enum latency_range { | ||
309 | lowest_latency = 0, | ||
310 | low_latency = 1, | ||
311 | bulk_latency = 2, | ||
312 | latency_invalid = 255 | ||
313 | }; | ||
314 | |||
315 | extern char igbvf_driver_name[]; | ||
316 | extern const char igbvf_driver_version[]; | ||
317 | |||
318 | extern void igbvf_check_options(struct igbvf_adapter *); | ||
319 | extern void igbvf_set_ethtool_ops(struct net_device *); | ||
320 | |||
321 | extern int igbvf_up(struct igbvf_adapter *); | ||
322 | extern void igbvf_down(struct igbvf_adapter *); | ||
323 | extern void igbvf_reinit_locked(struct igbvf_adapter *); | ||
324 | extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); | ||
325 | extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); | ||
326 | extern void igbvf_free_rx_resources(struct igbvf_ring *); | ||
327 | extern void igbvf_free_tx_resources(struct igbvf_ring *); | ||
328 | extern void igbvf_update_stats(struct igbvf_adapter *); | ||
329 | |||
330 | extern unsigned int copybreak; | ||
331 | |||
332 | #endif /* _IGBVF_H_ */ | ||
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c new file mode 100644 index 000000000000..819a8ec901dc --- /dev/null +++ b/drivers/net/igbvf/mbx.c | |||
@@ -0,0 +1,350 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel(R) 82576 Virtual Function Linux driver | ||
4 | Copyright(c) 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #include "mbx.h" | ||
29 | |||
30 | /** | ||
31 | * e1000_poll_for_msg - Wait for message notification | ||
32 | * @hw: pointer to the HW structure | ||
33 | * | ||
34 | * returns SUCCESS if it successfully received a message notification | ||
35 | **/ | ||
36 | static s32 e1000_poll_for_msg(struct e1000_hw *hw) | ||
37 | { | ||
38 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
39 | int countdown = mbx->timeout; | ||
40 | |||
41 | if (!mbx->ops.check_for_msg) | ||
42 | goto out; | ||
43 | |||
44 | while (countdown && mbx->ops.check_for_msg(hw)) { | ||
45 | countdown--; | ||
46 | udelay(mbx->usec_delay); | ||
47 | } | ||
48 | |||
49 | /* if we failed, all future posted messages fail until reset */ | ||
50 | if (!countdown) | ||
51 | mbx->timeout = 0; | ||
52 | out: | ||
53 | return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * e1000_poll_for_ack - Wait for message acknowledgement | ||
58 | * @hw: pointer to the HW structure | ||
59 | * | ||
60 | * returns SUCCESS if it successfully received a message acknowledgement | ||
61 | **/ | ||
62 | static s32 e1000_poll_for_ack(struct e1000_hw *hw) | ||
63 | { | ||
64 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
65 | int countdown = mbx->timeout; | ||
66 | |||
67 | if (!mbx->ops.check_for_ack) | ||
68 | goto out; | ||
69 | |||
70 | while (countdown && mbx->ops.check_for_ack(hw)) { | ||
71 | countdown--; | ||
72 | udelay(mbx->usec_delay); | ||
73 | } | ||
74 | |||
75 | /* if we failed, all future posted messages fail until reset */ | ||
76 | if (!countdown) | ||
77 | mbx->timeout = 0; | ||
78 | out: | ||
79 | return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * e1000_read_posted_mbx - Wait for message notification and receive message | ||
84 | * @hw: pointer to the HW structure | ||
85 | * @msg: The message buffer | ||
86 | * @size: Length of buffer | ||
87 | * | ||
88 | * returns SUCCESS if it successfully received a message notification and | ||
89 | * copied it into the receive buffer. | ||
90 | **/ | ||
91 | static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) | ||
92 | { | ||
93 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
94 | s32 ret_val = -E1000_ERR_MBX; | ||
95 | |||
96 | if (!mbx->ops.read) | ||
97 | goto out; | ||
98 | |||
99 | ret_val = e1000_poll_for_msg(hw); | ||
100 | |||
101 | /* if ack received read message, otherwise we timed out */ | ||
102 | if (!ret_val) | ||
103 | ret_val = mbx->ops.read(hw, msg, size); | ||
104 | out: | ||
105 | return ret_val; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack | ||
110 | * @hw: pointer to the HW structure | ||
111 | * @msg: The message buffer | ||
112 | * @size: Length of buffer | ||
113 | * | ||
114 | * returns SUCCESS if it successfully copied message into the buffer and | ||
115 | * received an ack to that message within delay * timeout period | ||
116 | **/ | ||
117 | static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) | ||
118 | { | ||
119 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
120 | s32 ret_val = -E1000_ERR_MBX; | ||
121 | |||
122 | /* exit if we either can't write or there isn't a defined timeout */ | ||
123 | if (!mbx->ops.write || !mbx->timeout) | ||
124 | goto out; | ||
125 | |||
126 | /* send msg*/ | ||
127 | ret_val = mbx->ops.write(hw, msg, size); | ||
128 | |||
129 | /* if msg sent wait until we receive an ack */ | ||
130 | if (!ret_val) | ||
131 | ret_val = e1000_poll_for_ack(hw); | ||
132 | out: | ||
133 | return ret_val; | ||
134 | } | ||
135 | |||
136 | /** | ||
137 | * e1000_read_v2p_mailbox - read v2p mailbox | ||
138 | * @hw: pointer to the HW structure | ||
139 | * | ||
140 | * This function is used to read the v2p mailbox without losing the read to | ||
141 | * clear status bits. | ||
142 | **/ | ||
143 | static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) | ||
144 | { | ||
145 | u32 v2p_mailbox = er32(V2PMAILBOX(0)); | ||
146 | |||
147 | v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; | ||
148 | hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; | ||
149 | |||
150 | return v2p_mailbox; | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * e1000_check_for_bit_vf - Determine if a status bit was set | ||
155 | * @hw: pointer to the HW structure | ||
156 | * @mask: bitmask for bits to be tested and cleared | ||
157 | * | ||
158 | * This function is used to check for the read to clear bits within | ||
159 | * the V2P mailbox. | ||
160 | **/ | ||
161 | static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) | ||
162 | { | ||
163 | u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); | ||
164 | s32 ret_val = -E1000_ERR_MBX; | ||
165 | |||
166 | if (v2p_mailbox & mask) | ||
167 | ret_val = E1000_SUCCESS; | ||
168 | |||
169 | hw->dev_spec.vf.v2p_mailbox &= ~mask; | ||
170 | |||
171 | return ret_val; | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * e1000_check_for_msg_vf - checks to see if the PF has sent mail | ||
176 | * @hw: pointer to the HW structure | ||
177 | * | ||
178 | * returns SUCCESS if the PF has set the Status bit or else ERR_MBX | ||
179 | **/ | ||
180 | static s32 e1000_check_for_msg_vf(struct e1000_hw *hw) | ||
181 | { | ||
182 | s32 ret_val = -E1000_ERR_MBX; | ||
183 | |||
184 | if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { | ||
185 | ret_val = E1000_SUCCESS; | ||
186 | hw->mbx.stats.reqs++; | ||
187 | } | ||
188 | |||
189 | return ret_val; | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * e1000_check_for_ack_vf - checks to see if the PF has ACK'd | ||
194 | * @hw: pointer to the HW structure | ||
195 | * | ||
196 | * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX | ||
197 | **/ | ||
198 | static s32 e1000_check_for_ack_vf(struct e1000_hw *hw) | ||
199 | { | ||
200 | s32 ret_val = -E1000_ERR_MBX; | ||
201 | |||
202 | if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { | ||
203 | ret_val = E1000_SUCCESS; | ||
204 | hw->mbx.stats.acks++; | ||
205 | } | ||
206 | |||
207 | return ret_val; | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * e1000_check_for_rst_vf - checks to see if the PF has reset | ||
212 | * @hw: pointer to the HW structure | ||
213 | * | ||
214 | * returns true if the PF has set the reset done bit or else false | ||
215 | **/ | ||
216 | static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) | ||
217 | { | ||
218 | s32 ret_val = -E1000_ERR_MBX; | ||
219 | |||
220 | if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | | ||
221 | E1000_V2PMAILBOX_RSTI))) { | ||
222 | ret_val = E1000_SUCCESS; | ||
223 | hw->mbx.stats.rsts++; | ||
224 | } | ||
225 | |||
226 | return ret_val; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * e1000_obtain_mbx_lock_vf - obtain mailbox lock | ||
231 | * @hw: pointer to the HW structure | ||
232 | * | ||
233 | * return SUCCESS if we obtained the mailbox lock | ||
234 | **/ | ||
235 | static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) | ||
236 | { | ||
237 | s32 ret_val = -E1000_ERR_MBX; | ||
238 | |||
239 | /* Take ownership of the buffer */ | ||
240 | ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); | ||
241 | |||
242 | /* reserve mailbox for vf use */ | ||
243 | if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) | ||
244 | ret_val = E1000_SUCCESS; | ||
245 | |||
246 | return ret_val; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * e1000_write_mbx_vf - Write a message to the mailbox | ||
251 | * @hw: pointer to the HW structure | ||
252 | * @msg: The message buffer | ||
253 | * @size: Length of buffer | ||
254 | * | ||
255 | * returns SUCCESS if it successfully copied message into the buffer | ||
256 | **/ | ||
257 | static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) | ||
258 | { | ||
259 | s32 err; | ||
260 | u16 i; | ||
261 | |||
262 | /* lock the mailbox to prevent pf/vf race condition */ | ||
263 | err = e1000_obtain_mbx_lock_vf(hw); | ||
264 | if (err) | ||
265 | goto out_no_write; | ||
266 | |||
267 | /* flush any ack or msg as we are going to overwrite mailbox */ | ||
268 | e1000_check_for_ack_vf(hw); | ||
269 | e1000_check_for_msg_vf(hw); | ||
270 | |||
271 | /* copy the caller specified message to the mailbox memory buffer */ | ||
272 | for (i = 0; i < size; i++) | ||
273 | array_ew32(VMBMEM(0), i, msg[i]); | ||
274 | |||
275 | /* update stats */ | ||
276 | hw->mbx.stats.msgs_tx++; | ||
277 | |||
278 | /* Drop VFU and interrupt the PF to tell it a message has been sent */ | ||
279 | ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); | ||
280 | |||
281 | out_no_write: | ||
282 | return err; | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * e1000_read_mbx_vf - Reads a message from the inbox intended for vf | ||
287 | * @hw: pointer to the HW structure | ||
288 | * @msg: The message buffer | ||
289 | * @size: Length of buffer | ||
290 | * | ||
291 | * returns SUCCESS if it successfuly read message from buffer | ||
292 | **/ | ||
293 | static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) | ||
294 | { | ||
295 | s32 err; | ||
296 | u16 i; | ||
297 | |||
298 | /* lock the mailbox to prevent pf/vf race condition */ | ||
299 | err = e1000_obtain_mbx_lock_vf(hw); | ||
300 | if (err) | ||
301 | goto out_no_read; | ||
302 | |||
303 | /* copy the message from the mailbox memory buffer */ | ||
304 | for (i = 0; i < size; i++) | ||
305 | msg[i] = array_er32(VMBMEM(0), i); | ||
306 | |||
307 | /* Acknowledge receipt and release mailbox, then we're done */ | ||
308 | ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); | ||
309 | |||
310 | /* update stats */ | ||
311 | hw->mbx.stats.msgs_rx++; | ||
312 | |||
313 | out_no_read: | ||
314 | return err; | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * e1000_init_mbx_params_vf - set initial values for vf mailbox | ||
319 | * @hw: pointer to the HW structure | ||
320 | * | ||
321 | * Initializes the hw->mbx struct to correct values for vf mailbox | ||
322 | */ | ||
323 | s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) | ||
324 | { | ||
325 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
326 | |||
327 | /* start mailbox as timed out and let the reset_hw call set the timeout | ||
328 | * value to being communications */ | ||
329 | mbx->timeout = 0; | ||
330 | mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; | ||
331 | |||
332 | mbx->size = E1000_VFMAILBOX_SIZE; | ||
333 | |||
334 | mbx->ops.read = e1000_read_mbx_vf; | ||
335 | mbx->ops.write = e1000_write_mbx_vf; | ||
336 | mbx->ops.read_posted = e1000_read_posted_mbx; | ||
337 | mbx->ops.write_posted = e1000_write_posted_mbx; | ||
338 | mbx->ops.check_for_msg = e1000_check_for_msg_vf; | ||
339 | mbx->ops.check_for_ack = e1000_check_for_ack_vf; | ||
340 | mbx->ops.check_for_rst = e1000_check_for_rst_vf; | ||
341 | |||
342 | mbx->stats.msgs_tx = 0; | ||
343 | mbx->stats.msgs_rx = 0; | ||
344 | mbx->stats.reqs = 0; | ||
345 | mbx->stats.acks = 0; | ||
346 | mbx->stats.rsts = 0; | ||
347 | |||
348 | return E1000_SUCCESS; | ||
349 | } | ||
350 | |||
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h new file mode 100644 index 000000000000..4938609dbfb5 --- /dev/null +++ b/drivers/net/igbvf/mbx.h | |||
@@ -0,0 +1,75 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel(R) 82576 Virtual Function Linux driver | ||
4 | Copyright(c) 1999 - 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #ifndef _E1000_MBX_H_ | ||
29 | #define _E1000_MBX_H_ | ||
30 | |||
31 | #include "vf.h" | ||
32 | |||
33 | #define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ | ||
34 | #define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ | ||
35 | #define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ | ||
36 | #define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ | ||
37 | #define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ | ||
38 | #define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ | ||
39 | #define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ | ||
40 | #define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ | ||
41 | #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ | ||
42 | |||
43 | #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ | ||
44 | |||
45 | /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the | ||
46 | * PF. The reverse is true if it is E1000_PF_*. | ||
47 | * Message ACK's are the value or'd with 0xF0000000 | ||
48 | */ | ||
49 | #define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with | ||
50 | * this are the ACK */ | ||
51 | #define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with | ||
52 | * this are the NACK */ | ||
53 | #define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still | ||
54 | clear to send requests */ | ||
55 | |||
56 | /* We have a total wait time of 1s for vf mailbox posted messages */ | ||
57 | #define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ | ||
58 | #define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ | ||
59 | |||
60 | #define E1000_VT_MSGINFO_SHIFT 16 | ||
61 | /* bits 23:16 are used for exra info for certain messages */ | ||
62 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) | ||
63 | |||
64 | #define E1000_VF_RESET 0x01 /* VF requests reset */ | ||
65 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ | ||
66 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ | ||
67 | #define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ | ||
68 | #define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ | ||
69 | |||
70 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ | ||
71 | |||
72 | void e1000_init_mbx_ops_generic(struct e1000_hw *hw); | ||
73 | s32 e1000_init_mbx_params_vf(struct e1000_hw *); | ||
74 | |||
75 | #endif /* _E1000_MBX_H_ */ | ||
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c new file mode 100644 index 000000000000..b774666ad3cf --- /dev/null +++ b/drivers/net/igbvf/netdev.c | |||
@@ -0,0 +1,2922 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel(R) 82576 Virtual Function Linux driver | ||
4 | Copyright(c) 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/types.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/vmalloc.h> | ||
33 | #include <linux/pagemap.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/netdevice.h> | ||
36 | #include <linux/tcp.h> | ||
37 | #include <linux/ipv6.h> | ||
38 | #include <net/checksum.h> | ||
39 | #include <net/ip6_checksum.h> | ||
40 | #include <linux/mii.h> | ||
41 | #include <linux/ethtool.h> | ||
42 | #include <linux/if_vlan.h> | ||
43 | #include <linux/pm_qos_params.h> | ||
44 | |||
45 | #include "igbvf.h" | ||
46 | |||
47 | #define DRV_VERSION "1.0.0-k0" | ||
48 | char igbvf_driver_name[] = "igbvf"; | ||
49 | const char igbvf_driver_version[] = DRV_VERSION; | ||
50 | static const char igbvf_driver_string[] = | ||
51 | "Intel(R) Virtual Function Network Driver"; | ||
52 | static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; | ||
53 | |||
54 | static int igbvf_poll(struct napi_struct *napi, int budget); | ||
55 | static void igbvf_reset(struct igbvf_adapter *); | ||
56 | static void igbvf_set_interrupt_capability(struct igbvf_adapter *); | ||
57 | static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); | ||
58 | |||
59 | static struct igbvf_info igbvf_vf_info = { | ||
60 | .mac = e1000_vfadapt, | ||
61 | .flags = FLAG_HAS_JUMBO_FRAMES | ||
62 | | FLAG_RX_CSUM_ENABLED, | ||
63 | .pba = 10, | ||
64 | .init_ops = e1000_init_function_pointers_vf, | ||
65 | }; | ||
66 | |||
67 | static const struct igbvf_info *igbvf_info_tbl[] = { | ||
68 | [board_vf] = &igbvf_vf_info, | ||
69 | }; | ||
70 | |||
71 | /** | ||
72 | * igbvf_desc_unused - calculate if we have unused descriptors | ||
73 | **/ | ||
74 | static int igbvf_desc_unused(struct igbvf_ring *ring) | ||
75 | { | ||
76 | if (ring->next_to_clean > ring->next_to_use) | ||
77 | return ring->next_to_clean - ring->next_to_use - 1; | ||
78 | |||
79 | return ring->count + ring->next_to_clean - ring->next_to_use - 1; | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * igbvf_receive_skb - helper function to handle Rx indications | ||
84 | * @adapter: board private structure | ||
85 | * @status: descriptor status field as written by hardware | ||
86 | * @vlan: descriptor vlan field as written by hardware (no le/be conversion) | ||
87 | * @skb: pointer to sk_buff to be indicated to stack | ||
88 | **/ | ||
89 | static void igbvf_receive_skb(struct igbvf_adapter *adapter, | ||
90 | struct net_device *netdev, | ||
91 | struct sk_buff *skb, | ||
92 | u32 status, u16 vlan) | ||
93 | { | ||
94 | if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) | ||
95 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | ||
96 | le16_to_cpu(vlan) & | ||
97 | E1000_RXD_SPC_VLAN_MASK); | ||
98 | else | ||
99 | netif_receive_skb(skb); | ||
100 | |||
101 | netdev->last_rx = jiffies; | ||
102 | } | ||
103 | |||
104 | static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, | ||
105 | u32 status_err, struct sk_buff *skb) | ||
106 | { | ||
107 | skb->ip_summed = CHECKSUM_NONE; | ||
108 | |||
109 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ | ||
110 | if ((status_err & E1000_RXD_STAT_IXSM)) | ||
111 | return; | ||
112 | /* TCP/UDP checksum error bit is set */ | ||
113 | if (status_err & | ||
114 | (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { | ||
115 | /* let the stack verify checksum errors */ | ||
116 | adapter->hw_csum_err++; | ||
117 | return; | ||
118 | } | ||
119 | /* It must be a TCP or UDP packet with a valid checksum */ | ||
120 | if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) | ||
121 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
122 | |||
123 | adapter->hw_csum_good++; | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split | ||
128 | * @rx_ring: address of ring structure to repopulate | ||
129 | * @cleaned_count: number of buffers to repopulate | ||
130 | **/ | ||
131 | static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | ||
132 | int cleaned_count) | ||
133 | { | ||
134 | struct igbvf_adapter *adapter = rx_ring->adapter; | ||
135 | struct net_device *netdev = adapter->netdev; | ||
136 | struct pci_dev *pdev = adapter->pdev; | ||
137 | union e1000_adv_rx_desc *rx_desc; | ||
138 | struct igbvf_buffer *buffer_info; | ||
139 | struct sk_buff *skb; | ||
140 | unsigned int i; | ||
141 | int bufsz; | ||
142 | |||
143 | i = rx_ring->next_to_use; | ||
144 | buffer_info = &rx_ring->buffer_info[i]; | ||
145 | |||
146 | if (adapter->rx_ps_hdr_size) | ||
147 | bufsz = adapter->rx_ps_hdr_size; | ||
148 | else | ||
149 | bufsz = adapter->rx_buffer_len; | ||
150 | bufsz += NET_IP_ALIGN; | ||
151 | |||
152 | while (cleaned_count--) { | ||
153 | rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); | ||
154 | |||
155 | if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { | ||
156 | if (!buffer_info->page) { | ||
157 | buffer_info->page = alloc_page(GFP_ATOMIC); | ||
158 | if (!buffer_info->page) { | ||
159 | adapter->alloc_rx_buff_failed++; | ||
160 | goto no_buffers; | ||
161 | } | ||
162 | buffer_info->page_offset = 0; | ||
163 | } else { | ||
164 | buffer_info->page_offset ^= PAGE_SIZE / 2; | ||
165 | } | ||
166 | buffer_info->page_dma = | ||
167 | pci_map_page(pdev, buffer_info->page, | ||
168 | buffer_info->page_offset, | ||
169 | PAGE_SIZE / 2, | ||
170 | PCI_DMA_FROMDEVICE); | ||
171 | } | ||
172 | |||
173 | if (!buffer_info->skb) { | ||
174 | skb = netdev_alloc_skb(netdev, bufsz); | ||
175 | if (!skb) { | ||
176 | adapter->alloc_rx_buff_failed++; | ||
177 | goto no_buffers; | ||
178 | } | ||
179 | |||
180 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
181 | * this will result in a 16 byte aligned IP header after | ||
182 | * the 14 byte MAC header is removed | ||
183 | */ | ||
184 | skb_reserve(skb, NET_IP_ALIGN); | ||
185 | |||
186 | buffer_info->skb = skb; | ||
187 | buffer_info->dma = pci_map_single(pdev, skb->data, | ||
188 | bufsz, | ||
189 | PCI_DMA_FROMDEVICE); | ||
190 | } | ||
191 | /* Refresh the desc even if buffer_addrs didn't change because | ||
192 | * each write-back erases this info. */ | ||
193 | if (adapter->rx_ps_hdr_size) { | ||
194 | rx_desc->read.pkt_addr = | ||
195 | cpu_to_le64(buffer_info->page_dma); | ||
196 | rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); | ||
197 | } else { | ||
198 | rx_desc->read.pkt_addr = | ||
199 | cpu_to_le64(buffer_info->dma); | ||
200 | rx_desc->read.hdr_addr = 0; | ||
201 | } | ||
202 | |||
203 | i++; | ||
204 | if (i == rx_ring->count) | ||
205 | i = 0; | ||
206 | buffer_info = &rx_ring->buffer_info[i]; | ||
207 | } | ||
208 | |||
209 | no_buffers: | ||
210 | if (rx_ring->next_to_use != i) { | ||
211 | rx_ring->next_to_use = i; | ||
212 | if (i == 0) | ||
213 | i = (rx_ring->count - 1); | ||
214 | else | ||
215 | i--; | ||
216 | |||
217 | /* Force memory writes to complete before letting h/w | ||
218 | * know there are new descriptors to fetch. (Only | ||
219 | * applicable for weak-ordered memory model archs, | ||
220 | * such as IA-64). */ | ||
221 | wmb(); | ||
222 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
223 | } | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * igbvf_clean_rx_irq - Send received data up the network stack; legacy | ||
228 | * @adapter: board private structure | ||
229 | * | ||
230 | * the return value indicates whether actual cleaning was done, there | ||
231 | * is no guarantee that everything was cleaned | ||
232 | **/ | ||
233 | static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | ||
234 | int *work_done, int work_to_do) | ||
235 | { | ||
236 | struct igbvf_ring *rx_ring = adapter->rx_ring; | ||
237 | struct net_device *netdev = adapter->netdev; | ||
238 | struct pci_dev *pdev = adapter->pdev; | ||
239 | union e1000_adv_rx_desc *rx_desc, *next_rxd; | ||
240 | struct igbvf_buffer *buffer_info, *next_buffer; | ||
241 | struct sk_buff *skb; | ||
242 | bool cleaned = false; | ||
243 | int cleaned_count = 0; | ||
244 | unsigned int total_bytes = 0, total_packets = 0; | ||
245 | unsigned int i; | ||
246 | u32 length, hlen, staterr; | ||
247 | |||
248 | i = rx_ring->next_to_clean; | ||
249 | rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); | ||
250 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
251 | |||
252 | while (staterr & E1000_RXD_STAT_DD) { | ||
253 | if (*work_done >= work_to_do) | ||
254 | break; | ||
255 | (*work_done)++; | ||
256 | |||
257 | buffer_info = &rx_ring->buffer_info[i]; | ||
258 | |||
259 | /* HW will not DMA in data larger than the given buffer, even | ||
260 | * if it parses the (NFS, of course) header to be larger. In | ||
261 | * that case, it fills the header buffer and spills the rest | ||
262 | * into the page. | ||
263 | */ | ||
264 | hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & | ||
265 | E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; | ||
266 | if (hlen > adapter->rx_ps_hdr_size) | ||
267 | hlen = adapter->rx_ps_hdr_size; | ||
268 | |||
269 | length = le16_to_cpu(rx_desc->wb.upper.length); | ||
270 | cleaned = true; | ||
271 | cleaned_count++; | ||
272 | |||
273 | skb = buffer_info->skb; | ||
274 | prefetch(skb->data - NET_IP_ALIGN); | ||
275 | buffer_info->skb = NULL; | ||
276 | if (!adapter->rx_ps_hdr_size) { | ||
277 | pci_unmap_single(pdev, buffer_info->dma, | ||
278 | adapter->rx_buffer_len, | ||
279 | PCI_DMA_FROMDEVICE); | ||
280 | buffer_info->dma = 0; | ||
281 | skb_put(skb, length); | ||
282 | goto send_up; | ||
283 | } | ||
284 | |||
285 | if (!skb_shinfo(skb)->nr_frags) { | ||
286 | pci_unmap_single(pdev, buffer_info->dma, | ||
287 | adapter->rx_ps_hdr_size + NET_IP_ALIGN, | ||
288 | PCI_DMA_FROMDEVICE); | ||
289 | skb_put(skb, hlen); | ||
290 | } | ||
291 | |||
292 | if (length) { | ||
293 | pci_unmap_page(pdev, buffer_info->page_dma, | ||
294 | PAGE_SIZE / 2, | ||
295 | PCI_DMA_FROMDEVICE); | ||
296 | buffer_info->page_dma = 0; | ||
297 | |||
298 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, | ||
299 | buffer_info->page, | ||
300 | buffer_info->page_offset, | ||
301 | length); | ||
302 | |||
303 | if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || | ||
304 | (page_count(buffer_info->page) != 1)) | ||
305 | buffer_info->page = NULL; | ||
306 | else | ||
307 | get_page(buffer_info->page); | ||
308 | |||
309 | skb->len += length; | ||
310 | skb->data_len += length; | ||
311 | skb->truesize += length; | ||
312 | } | ||
313 | send_up: | ||
314 | i++; | ||
315 | if (i == rx_ring->count) | ||
316 | i = 0; | ||
317 | next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); | ||
318 | prefetch(next_rxd); | ||
319 | next_buffer = &rx_ring->buffer_info[i]; | ||
320 | |||
321 | if (!(staterr & E1000_RXD_STAT_EOP)) { | ||
322 | buffer_info->skb = next_buffer->skb; | ||
323 | buffer_info->dma = next_buffer->dma; | ||
324 | next_buffer->skb = skb; | ||
325 | next_buffer->dma = 0; | ||
326 | goto next_desc; | ||
327 | } | ||
328 | |||
329 | if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { | ||
330 | dev_kfree_skb_irq(skb); | ||
331 | goto next_desc; | ||
332 | } | ||
333 | |||
334 | total_bytes += skb->len; | ||
335 | total_packets++; | ||
336 | |||
337 | igbvf_rx_checksum_adv(adapter, staterr, skb); | ||
338 | |||
339 | skb->protocol = eth_type_trans(skb, netdev); | ||
340 | |||
341 | igbvf_receive_skb(adapter, netdev, skb, staterr, | ||
342 | rx_desc->wb.upper.vlan); | ||
343 | |||
344 | netdev->last_rx = jiffies; | ||
345 | |||
346 | next_desc: | ||
347 | rx_desc->wb.upper.status_error = 0; | ||
348 | |||
349 | /* return some buffers to hardware, one at a time is too slow */ | ||
350 | if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { | ||
351 | igbvf_alloc_rx_buffers(rx_ring, cleaned_count); | ||
352 | cleaned_count = 0; | ||
353 | } | ||
354 | |||
355 | /* use prefetched values */ | ||
356 | rx_desc = next_rxd; | ||
357 | buffer_info = next_buffer; | ||
358 | |||
359 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
360 | } | ||
361 | |||
362 | rx_ring->next_to_clean = i; | ||
363 | cleaned_count = igbvf_desc_unused(rx_ring); | ||
364 | |||
365 | if (cleaned_count) | ||
366 | igbvf_alloc_rx_buffers(rx_ring, cleaned_count); | ||
367 | |||
368 | adapter->total_rx_packets += total_packets; | ||
369 | adapter->total_rx_bytes += total_bytes; | ||
370 | adapter->net_stats.rx_bytes += total_bytes; | ||
371 | adapter->net_stats.rx_packets += total_packets; | ||
372 | return cleaned; | ||
373 | } | ||
374 | |||
375 | static void igbvf_put_txbuf(struct igbvf_adapter *adapter, | ||
376 | struct igbvf_buffer *buffer_info) | ||
377 | { | ||
378 | buffer_info->dma = 0; | ||
379 | if (buffer_info->skb) { | ||
380 | skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, | ||
381 | DMA_TO_DEVICE); | ||
382 | dev_kfree_skb_any(buffer_info->skb); | ||
383 | buffer_info->skb = NULL; | ||
384 | } | ||
385 | buffer_info->time_stamp = 0; | ||
386 | } | ||
387 | |||
388 | static void igbvf_print_tx_hang(struct igbvf_adapter *adapter) | ||
389 | { | ||
390 | struct igbvf_ring *tx_ring = adapter->tx_ring; | ||
391 | unsigned int i = tx_ring->next_to_clean; | ||
392 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; | ||
393 | union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); | ||
394 | |||
395 | /* detected Tx unit hang */ | ||
396 | dev_err(&adapter->pdev->dev, | ||
397 | "Detected Tx Unit Hang:\n" | ||
398 | " TDH <%x>\n" | ||
399 | " TDT <%x>\n" | ||
400 | " next_to_use <%x>\n" | ||
401 | " next_to_clean <%x>\n" | ||
402 | "buffer_info[next_to_clean]:\n" | ||
403 | " time_stamp <%lx>\n" | ||
404 | " next_to_watch <%x>\n" | ||
405 | " jiffies <%lx>\n" | ||
406 | " next_to_watch.status <%x>\n", | ||
407 | readl(adapter->hw.hw_addr + tx_ring->head), | ||
408 | readl(adapter->hw.hw_addr + tx_ring->tail), | ||
409 | tx_ring->next_to_use, | ||
410 | tx_ring->next_to_clean, | ||
411 | tx_ring->buffer_info[eop].time_stamp, | ||
412 | eop, | ||
413 | jiffies, | ||
414 | eop_desc->wb.status); | ||
415 | } | ||
416 | |||
417 | /** | ||
418 | * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) | ||
419 | * @adapter: board private structure | ||
420 | * | ||
421 | * Return 0 on success, negative on failure | ||
422 | **/ | ||
423 | int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, | ||
424 | struct igbvf_ring *tx_ring) | ||
425 | { | ||
426 | struct pci_dev *pdev = adapter->pdev; | ||
427 | int size; | ||
428 | |||
429 | size = sizeof(struct igbvf_buffer) * tx_ring->count; | ||
430 | tx_ring->buffer_info = vmalloc(size); | ||
431 | if (!tx_ring->buffer_info) | ||
432 | goto err; | ||
433 | memset(tx_ring->buffer_info, 0, size); | ||
434 | |||
435 | /* round up to nearest 4K */ | ||
436 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); | ||
437 | tx_ring->size = ALIGN(tx_ring->size, 4096); | ||
438 | |||
439 | tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, | ||
440 | &tx_ring->dma); | ||
441 | |||
442 | if (!tx_ring->desc) | ||
443 | goto err; | ||
444 | |||
445 | tx_ring->adapter = adapter; | ||
446 | tx_ring->next_to_use = 0; | ||
447 | tx_ring->next_to_clean = 0; | ||
448 | |||
449 | return 0; | ||
450 | err: | ||
451 | vfree(tx_ring->buffer_info); | ||
452 | dev_err(&adapter->pdev->dev, | ||
453 | "Unable to allocate memory for the transmit descriptor ring\n"); | ||
454 | return -ENOMEM; | ||
455 | } | ||
456 | |||
457 | /** | ||
458 | * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) | ||
459 | * @adapter: board private structure | ||
460 | * | ||
461 | * Returns 0 on success, negative on failure | ||
462 | **/ | ||
463 | int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, | ||
464 | struct igbvf_ring *rx_ring) | ||
465 | { | ||
466 | struct pci_dev *pdev = adapter->pdev; | ||
467 | int size, desc_len; | ||
468 | |||
469 | size = sizeof(struct igbvf_buffer) * rx_ring->count; | ||
470 | rx_ring->buffer_info = vmalloc(size); | ||
471 | if (!rx_ring->buffer_info) | ||
472 | goto err; | ||
473 | memset(rx_ring->buffer_info, 0, size); | ||
474 | |||
475 | desc_len = sizeof(union e1000_adv_rx_desc); | ||
476 | |||
477 | /* Round up to nearest 4K */ | ||
478 | rx_ring->size = rx_ring->count * desc_len; | ||
479 | rx_ring->size = ALIGN(rx_ring->size, 4096); | ||
480 | |||
481 | rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, | ||
482 | &rx_ring->dma); | ||
483 | |||
484 | if (!rx_ring->desc) | ||
485 | goto err; | ||
486 | |||
487 | rx_ring->next_to_clean = 0; | ||
488 | rx_ring->next_to_use = 0; | ||
489 | |||
490 | rx_ring->adapter = adapter; | ||
491 | |||
492 | return 0; | ||
493 | |||
494 | err: | ||
495 | vfree(rx_ring->buffer_info); | ||
496 | rx_ring->buffer_info = NULL; | ||
497 | dev_err(&adapter->pdev->dev, | ||
498 | "Unable to allocate memory for the receive descriptor ring\n"); | ||
499 | return -ENOMEM; | ||
500 | } | ||
501 | |||
502 | /** | ||
503 | * igbvf_clean_tx_ring - Free Tx Buffers | ||
504 | * @tx_ring: ring to be cleaned | ||
505 | **/ | ||
506 | static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) | ||
507 | { | ||
508 | struct igbvf_adapter *adapter = tx_ring->adapter; | ||
509 | struct igbvf_buffer *buffer_info; | ||
510 | unsigned long size; | ||
511 | unsigned int i; | ||
512 | |||
513 | if (!tx_ring->buffer_info) | ||
514 | return; | ||
515 | |||
516 | /* Free all the Tx ring sk_buffs */ | ||
517 | for (i = 0; i < tx_ring->count; i++) { | ||
518 | buffer_info = &tx_ring->buffer_info[i]; | ||
519 | igbvf_put_txbuf(adapter, buffer_info); | ||
520 | } | ||
521 | |||
522 | size = sizeof(struct igbvf_buffer) * tx_ring->count; | ||
523 | memset(tx_ring->buffer_info, 0, size); | ||
524 | |||
525 | /* Zero out the descriptor ring */ | ||
526 | memset(tx_ring->desc, 0, tx_ring->size); | ||
527 | |||
528 | tx_ring->next_to_use = 0; | ||
529 | tx_ring->next_to_clean = 0; | ||
530 | |||
531 | writel(0, adapter->hw.hw_addr + tx_ring->head); | ||
532 | writel(0, adapter->hw.hw_addr + tx_ring->tail); | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * igbvf_free_tx_resources - Free Tx Resources per Queue | ||
537 | * @tx_ring: ring to free resources from | ||
538 | * | ||
539 | * Free all transmit software resources | ||
540 | **/ | ||
541 | void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) | ||
542 | { | ||
543 | struct pci_dev *pdev = tx_ring->adapter->pdev; | ||
544 | |||
545 | igbvf_clean_tx_ring(tx_ring); | ||
546 | |||
547 | vfree(tx_ring->buffer_info); | ||
548 | tx_ring->buffer_info = NULL; | ||
549 | |||
550 | pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); | ||
551 | |||
552 | tx_ring->desc = NULL; | ||
553 | } | ||
554 | |||
555 | /** | ||
556 | * igbvf_clean_rx_ring - Free Rx Buffers per Queue | ||
557 | * @adapter: board private structure | ||
558 | **/ | ||
559 | static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) | ||
560 | { | ||
561 | struct igbvf_adapter *adapter = rx_ring->adapter; | ||
562 | struct igbvf_buffer *buffer_info; | ||
563 | struct pci_dev *pdev = adapter->pdev; | ||
564 | unsigned long size; | ||
565 | unsigned int i; | ||
566 | |||
567 | if (!rx_ring->buffer_info) | ||
568 | return; | ||
569 | |||
570 | /* Free all the Rx ring sk_buffs */ | ||
571 | for (i = 0; i < rx_ring->count; i++) { | ||
572 | buffer_info = &rx_ring->buffer_info[i]; | ||
573 | if (buffer_info->dma) { | ||
574 | if (adapter->rx_ps_hdr_size){ | ||
575 | pci_unmap_single(pdev, buffer_info->dma, | ||
576 | adapter->rx_ps_hdr_size, | ||
577 | PCI_DMA_FROMDEVICE); | ||
578 | } else { | ||
579 | pci_unmap_single(pdev, buffer_info->dma, | ||
580 | adapter->rx_buffer_len, | ||
581 | PCI_DMA_FROMDEVICE); | ||
582 | } | ||
583 | buffer_info->dma = 0; | ||
584 | } | ||
585 | |||
586 | if (buffer_info->skb) { | ||
587 | dev_kfree_skb(buffer_info->skb); | ||
588 | buffer_info->skb = NULL; | ||
589 | } | ||
590 | |||
591 | if (buffer_info->page) { | ||
592 | if (buffer_info->page_dma) | ||
593 | pci_unmap_page(pdev, buffer_info->page_dma, | ||
594 | PAGE_SIZE / 2, | ||
595 | PCI_DMA_FROMDEVICE); | ||
596 | put_page(buffer_info->page); | ||
597 | buffer_info->page = NULL; | ||
598 | buffer_info->page_dma = 0; | ||
599 | buffer_info->page_offset = 0; | ||
600 | } | ||
601 | } | ||
602 | |||
603 | size = sizeof(struct igbvf_buffer) * rx_ring->count; | ||
604 | memset(rx_ring->buffer_info, 0, size); | ||
605 | |||
606 | /* Zero out the descriptor ring */ | ||
607 | memset(rx_ring->desc, 0, rx_ring->size); | ||
608 | |||
609 | rx_ring->next_to_clean = 0; | ||
610 | rx_ring->next_to_use = 0; | ||
611 | |||
612 | writel(0, adapter->hw.hw_addr + rx_ring->head); | ||
613 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | ||
614 | } | ||
615 | |||
616 | /** | ||
617 | * igbvf_free_rx_resources - Free Rx Resources | ||
618 | * @rx_ring: ring to clean the resources from | ||
619 | * | ||
620 | * Free all receive software resources | ||
621 | **/ | ||
622 | |||
623 | void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) | ||
624 | { | ||
625 | struct pci_dev *pdev = rx_ring->adapter->pdev; | ||
626 | |||
627 | igbvf_clean_rx_ring(rx_ring); | ||
628 | |||
629 | vfree(rx_ring->buffer_info); | ||
630 | rx_ring->buffer_info = NULL; | ||
631 | |||
632 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | ||
633 | rx_ring->dma); | ||
634 | rx_ring->desc = NULL; | ||
635 | } | ||
636 | |||
637 | /** | ||
638 | * igbvf_update_itr - update the dynamic ITR value based on statistics | ||
639 | * @adapter: pointer to adapter | ||
640 | * @itr_setting: current adapter->itr | ||
641 | * @packets: the number of packets during this measurement interval | ||
642 | * @bytes: the number of bytes during this measurement interval | ||
643 | * | ||
644 | * Stores a new ITR value based on packets and byte | ||
645 | * counts during the last interrupt. The advantage of per interrupt | ||
646 | * computation is faster updates and more accurate ITR for the current | ||
647 | * traffic pattern. Constants in this function were computed | ||
648 | * based on theoretical maximum wire speed and thresholds were set based | ||
649 | * on testing data as well as attempting to minimize response time | ||
650 | * while increasing bulk throughput. This functionality is controlled | ||
651 | * by the InterruptThrottleRate module parameter. | ||
652 | **/ | ||
653 | static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, | ||
654 | u16 itr_setting, int packets, | ||
655 | int bytes) | ||
656 | { | ||
657 | unsigned int retval = itr_setting; | ||
658 | |||
659 | if (packets == 0) | ||
660 | goto update_itr_done; | ||
661 | |||
662 | switch (itr_setting) { | ||
663 | case lowest_latency: | ||
664 | /* handle TSO and jumbo frames */ | ||
665 | if (bytes/packets > 8000) | ||
666 | retval = bulk_latency; | ||
667 | else if ((packets < 5) && (bytes > 512)) | ||
668 | retval = low_latency; | ||
669 | break; | ||
670 | case low_latency: /* 50 usec aka 20000 ints/s */ | ||
671 | if (bytes > 10000) { | ||
672 | /* this if handles the TSO accounting */ | ||
673 | if (bytes/packets > 8000) | ||
674 | retval = bulk_latency; | ||
675 | else if ((packets < 10) || ((bytes/packets) > 1200)) | ||
676 | retval = bulk_latency; | ||
677 | else if ((packets > 35)) | ||
678 | retval = lowest_latency; | ||
679 | } else if (bytes/packets > 2000) { | ||
680 | retval = bulk_latency; | ||
681 | } else if (packets <= 2 && bytes < 512) { | ||
682 | retval = lowest_latency; | ||
683 | } | ||
684 | break; | ||
685 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | ||
686 | if (bytes > 25000) { | ||
687 | if (packets > 35) | ||
688 | retval = low_latency; | ||
689 | } else if (bytes < 6000) { | ||
690 | retval = low_latency; | ||
691 | } | ||
692 | break; | ||
693 | } | ||
694 | |||
695 | update_itr_done: | ||
696 | return retval; | ||
697 | } | ||
698 | |||
699 | static void igbvf_set_itr(struct igbvf_adapter *adapter) | ||
700 | { | ||
701 | struct e1000_hw *hw = &adapter->hw; | ||
702 | u16 current_itr; | ||
703 | u32 new_itr = adapter->itr; | ||
704 | |||
705 | adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr, | ||
706 | adapter->total_tx_packets, | ||
707 | adapter->total_tx_bytes); | ||
708 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
709 | if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) | ||
710 | adapter->tx_itr = low_latency; | ||
711 | |||
712 | adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr, | ||
713 | adapter->total_rx_packets, | ||
714 | adapter->total_rx_bytes); | ||
715 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
716 | if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) | ||
717 | adapter->rx_itr = low_latency; | ||
718 | |||
719 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | ||
720 | |||
721 | switch (current_itr) { | ||
722 | /* counts and packets in update_itr are dependent on these numbers */ | ||
723 | case lowest_latency: | ||
724 | new_itr = 70000; | ||
725 | break; | ||
726 | case low_latency: | ||
727 | new_itr = 20000; /* aka hwitr = ~200 */ | ||
728 | break; | ||
729 | case bulk_latency: | ||
730 | new_itr = 4000; | ||
731 | break; | ||
732 | default: | ||
733 | break; | ||
734 | } | ||
735 | |||
736 | if (new_itr != adapter->itr) { | ||
737 | /* | ||
738 | * this attempts to bias the interrupt rate towards Bulk | ||
739 | * by adding intermediate steps when interrupt rate is | ||
740 | * increasing | ||
741 | */ | ||
742 | new_itr = new_itr > adapter->itr ? | ||
743 | min(adapter->itr + (new_itr >> 2), new_itr) : | ||
744 | new_itr; | ||
745 | adapter->itr = new_itr; | ||
746 | adapter->rx_ring->itr_val = 1952; | ||
747 | |||
748 | if (adapter->msix_entries) | ||
749 | adapter->rx_ring->set_itr = 1; | ||
750 | else | ||
751 | ew32(ITR, 1952); | ||
752 | } | ||
753 | } | ||
754 | |||
755 | /** | ||
756 | * igbvf_clean_tx_irq - Reclaim resources after transmit completes | ||
757 | * @adapter: board private structure | ||
758 | * returns true if ring is completely cleaned | ||
759 | **/ | ||
760 | static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) | ||
761 | { | ||
762 | struct igbvf_adapter *adapter = tx_ring->adapter; | ||
763 | struct e1000_hw *hw = &adapter->hw; | ||
764 | struct net_device *netdev = adapter->netdev; | ||
765 | struct igbvf_buffer *buffer_info; | ||
766 | struct sk_buff *skb; | ||
767 | union e1000_adv_tx_desc *tx_desc, *eop_desc; | ||
768 | unsigned int total_bytes = 0, total_packets = 0; | ||
769 | unsigned int i, eop, count = 0; | ||
770 | bool cleaned = false; | ||
771 | |||
772 | i = tx_ring->next_to_clean; | ||
773 | eop = tx_ring->buffer_info[i].next_to_watch; | ||
774 | eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); | ||
775 | |||
776 | while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && | ||
777 | (count < tx_ring->count)) { | ||
778 | for (cleaned = false; !cleaned; count++) { | ||
779 | tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); | ||
780 | buffer_info = &tx_ring->buffer_info[i]; | ||
781 | cleaned = (i == eop); | ||
782 | skb = buffer_info->skb; | ||
783 | |||
784 | if (skb) { | ||
785 | unsigned int segs, bytecount; | ||
786 | |||
787 | /* gso_segs is currently only valid for tcp */ | ||
788 | segs = skb_shinfo(skb)->gso_segs ?: 1; | ||
789 | /* multiply data chunks by size of headers */ | ||
790 | bytecount = ((segs - 1) * skb_headlen(skb)) + | ||
791 | skb->len; | ||
792 | total_packets += segs; | ||
793 | total_bytes += bytecount; | ||
794 | } | ||
795 | |||
796 | igbvf_put_txbuf(adapter, buffer_info); | ||
797 | tx_desc->wb.status = 0; | ||
798 | |||
799 | i++; | ||
800 | if (i == tx_ring->count) | ||
801 | i = 0; | ||
802 | } | ||
803 | eop = tx_ring->buffer_info[i].next_to_watch; | ||
804 | eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); | ||
805 | } | ||
806 | |||
807 | tx_ring->next_to_clean = i; | ||
808 | |||
809 | if (unlikely(count && | ||
810 | netif_carrier_ok(netdev) && | ||
811 | igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { | ||
812 | /* Make sure that anybody stopping the queue after this | ||
813 | * sees the new next_to_clean. | ||
814 | */ | ||
815 | smp_mb(); | ||
816 | if (netif_queue_stopped(netdev) && | ||
817 | !(test_bit(__IGBVF_DOWN, &adapter->state))) { | ||
818 | netif_wake_queue(netdev); | ||
819 | ++adapter->restart_queue; | ||
820 | } | ||
821 | } | ||
822 | |||
823 | if (adapter->detect_tx_hung) { | ||
824 | /* Detect a transmit hang in hardware, this serializes the | ||
825 | * check with the clearing of time_stamp and movement of i */ | ||
826 | adapter->detect_tx_hung = false; | ||
827 | if (tx_ring->buffer_info[i].time_stamp && | ||
828 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp + | ||
829 | (adapter->tx_timeout_factor * HZ)) | ||
830 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { | ||
831 | |||
832 | tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); | ||
833 | /* detected Tx unit hang */ | ||
834 | igbvf_print_tx_hang(adapter); | ||
835 | |||
836 | netif_stop_queue(netdev); | ||
837 | } | ||
838 | } | ||
839 | adapter->net_stats.tx_bytes += total_bytes; | ||
840 | adapter->net_stats.tx_packets += total_packets; | ||
841 | return (count < tx_ring->count); | ||
842 | } | ||
843 | |||
844 | static irqreturn_t igbvf_msix_other(int irq, void *data) | ||
845 | { | ||
846 | struct net_device *netdev = data; | ||
847 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
848 | struct e1000_hw *hw = &adapter->hw; | ||
849 | |||
850 | adapter->int_counter1++; | ||
851 | |||
852 | netif_carrier_off(netdev); | ||
853 | hw->mac.get_link_status = 1; | ||
854 | if (!test_bit(__IGBVF_DOWN, &adapter->state)) | ||
855 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
856 | |||
857 | ew32(EIMS, adapter->eims_other); | ||
858 | |||
859 | return IRQ_HANDLED; | ||
860 | } | ||
861 | |||
862 | static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) | ||
863 | { | ||
864 | struct net_device *netdev = data; | ||
865 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
866 | struct e1000_hw *hw = &adapter->hw; | ||
867 | struct igbvf_ring *tx_ring = adapter->tx_ring; | ||
868 | |||
869 | |||
870 | adapter->total_tx_bytes = 0; | ||
871 | adapter->total_tx_packets = 0; | ||
872 | |||
873 | /* auto mask will automatically reenable the interrupt when we write | ||
874 | * EICS */ | ||
875 | if (!igbvf_clean_tx_irq(tx_ring)) | ||
876 | /* Ring was not completely cleaned, so fire another interrupt */ | ||
877 | ew32(EICS, tx_ring->eims_value); | ||
878 | else | ||
879 | ew32(EIMS, tx_ring->eims_value); | ||
880 | |||
881 | return IRQ_HANDLED; | ||
882 | } | ||
883 | |||
884 | static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) | ||
885 | { | ||
886 | struct net_device *netdev = data; | ||
887 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
888 | |||
889 | adapter->int_counter0++; | ||
890 | |||
891 | /* Write the ITR value calculated at the end of the | ||
892 | * previous interrupt. | ||
893 | */ | ||
894 | if (adapter->rx_ring->set_itr) { | ||
895 | writel(adapter->rx_ring->itr_val, | ||
896 | adapter->hw.hw_addr + adapter->rx_ring->itr_register); | ||
897 | adapter->rx_ring->set_itr = 0; | ||
898 | } | ||
899 | |||
900 | if (napi_schedule_prep(&adapter->rx_ring->napi)) { | ||
901 | adapter->total_rx_bytes = 0; | ||
902 | adapter->total_rx_packets = 0; | ||
903 | __napi_schedule(&adapter->rx_ring->napi); | ||
904 | } | ||
905 | |||
906 | return IRQ_HANDLED; | ||
907 | } | ||
908 | |||
909 | #define IGBVF_NO_QUEUE -1 | ||
910 | |||
911 | static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, | ||
912 | int tx_queue, int msix_vector) | ||
913 | { | ||
914 | struct e1000_hw *hw = &adapter->hw; | ||
915 | u32 ivar, index; | ||
916 | |||
917 | /* 82576 uses a table-based method for assigning vectors. | ||
918 | Each queue has a single entry in the table to which we write | ||
919 | a vector number along with a "valid" bit. Sadly, the layout | ||
920 | of the table is somewhat counterintuitive. */ | ||
921 | if (rx_queue > IGBVF_NO_QUEUE) { | ||
922 | index = (rx_queue >> 1); | ||
923 | ivar = array_er32(IVAR0, index); | ||
924 | if (rx_queue & 0x1) { | ||
925 | /* vector goes into third byte of register */ | ||
926 | ivar = ivar & 0xFF00FFFF; | ||
927 | ivar |= (msix_vector | E1000_IVAR_VALID) << 16; | ||
928 | } else { | ||
929 | /* vector goes into low byte of register */ | ||
930 | ivar = ivar & 0xFFFFFF00; | ||
931 | ivar |= msix_vector | E1000_IVAR_VALID; | ||
932 | } | ||
933 | adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; | ||
934 | array_ew32(IVAR0, index, ivar); | ||
935 | } | ||
936 | if (tx_queue > IGBVF_NO_QUEUE) { | ||
937 | index = (tx_queue >> 1); | ||
938 | ivar = array_er32(IVAR0, index); | ||
939 | if (tx_queue & 0x1) { | ||
940 | /* vector goes into high byte of register */ | ||
941 | ivar = ivar & 0x00FFFFFF; | ||
942 | ivar |= (msix_vector | E1000_IVAR_VALID) << 24; | ||
943 | } else { | ||
944 | /* vector goes into second byte of register */ | ||
945 | ivar = ivar & 0xFFFF00FF; | ||
946 | ivar |= (msix_vector | E1000_IVAR_VALID) << 8; | ||
947 | } | ||
948 | adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; | ||
949 | array_ew32(IVAR0, index, ivar); | ||
950 | } | ||
951 | } | ||
952 | |||
953 | /** | ||
954 | * igbvf_configure_msix - Configure MSI-X hardware | ||
955 | * | ||
956 | * igbvf_configure_msix sets up the hardware to properly | ||
957 | * generate MSI-X interrupts. | ||
958 | **/ | ||
959 | static void igbvf_configure_msix(struct igbvf_adapter *adapter) | ||
960 | { | ||
961 | u32 tmp; | ||
962 | struct e1000_hw *hw = &adapter->hw; | ||
963 | struct igbvf_ring *tx_ring = adapter->tx_ring; | ||
964 | struct igbvf_ring *rx_ring = adapter->rx_ring; | ||
965 | int vector = 0; | ||
966 | |||
967 | adapter->eims_enable_mask = 0; | ||
968 | |||
969 | igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); | ||
970 | adapter->eims_enable_mask |= tx_ring->eims_value; | ||
971 | if (tx_ring->itr_val) | ||
972 | writel(tx_ring->itr_val, | ||
973 | hw->hw_addr + tx_ring->itr_register); | ||
974 | else | ||
975 | writel(1952, hw->hw_addr + tx_ring->itr_register); | ||
976 | |||
977 | igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); | ||
978 | adapter->eims_enable_mask |= rx_ring->eims_value; | ||
979 | if (rx_ring->itr_val) | ||
980 | writel(rx_ring->itr_val, | ||
981 | hw->hw_addr + rx_ring->itr_register); | ||
982 | else | ||
983 | writel(1952, hw->hw_addr + rx_ring->itr_register); | ||
984 | |||
985 | /* set vector for other causes, i.e. link changes */ | ||
986 | |||
987 | tmp = (vector++ | E1000_IVAR_VALID); | ||
988 | |||
989 | ew32(IVAR_MISC, tmp); | ||
990 | |||
991 | adapter->eims_enable_mask = (1 << (vector)) - 1; | ||
992 | adapter->eims_other = 1 << (vector - 1); | ||
993 | e1e_flush(); | ||
994 | } | ||
995 | |||
996 | static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) | ||
997 | { | ||
998 | if (adapter->msix_entries) { | ||
999 | pci_disable_msix(adapter->pdev); | ||
1000 | kfree(adapter->msix_entries); | ||
1001 | adapter->msix_entries = NULL; | ||
1002 | } | ||
1003 | } | ||
1004 | |||
1005 | /** | ||
1006 | * igbvf_set_interrupt_capability - set MSI or MSI-X if supported | ||
1007 | * | ||
1008 | * Attempt to configure interrupts using the best available | ||
1009 | * capabilities of the hardware and kernel. | ||
1010 | **/ | ||
1011 | static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) | ||
1012 | { | ||
1013 | int err = -ENOMEM; | ||
1014 | int i; | ||
1015 | |||
1016 | /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ | ||
1017 | adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), | ||
1018 | GFP_KERNEL); | ||
1019 | if (adapter->msix_entries) { | ||
1020 | for (i = 0; i < 3; i++) | ||
1021 | adapter->msix_entries[i].entry = i; | ||
1022 | |||
1023 | err = pci_enable_msix(adapter->pdev, | ||
1024 | adapter->msix_entries, 3); | ||
1025 | } | ||
1026 | |||
1027 | if (err) { | ||
1028 | /* MSI-X failed */ | ||
1029 | dev_err(&adapter->pdev->dev, | ||
1030 | "Failed to initialize MSI-X interrupts.\n"); | ||
1031 | igbvf_reset_interrupt_capability(adapter); | ||
1032 | } | ||
1033 | } | ||
1034 | |||
1035 | /** | ||
1036 | * igbvf_request_msix - Initialize MSI-X interrupts | ||
1037 | * | ||
1038 | * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the | ||
1039 | * kernel. | ||
1040 | **/ | ||
1041 | static int igbvf_request_msix(struct igbvf_adapter *adapter) | ||
1042 | { | ||
1043 | struct net_device *netdev = adapter->netdev; | ||
1044 | int err = 0, vector = 0; | ||
1045 | |||
1046 | if (strlen(netdev->name) < (IFNAMSIZ - 5)) { | ||
1047 | sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); | ||
1048 | sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); | ||
1049 | } else { | ||
1050 | memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); | ||
1051 | memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); | ||
1052 | } | ||
1053 | |||
1054 | err = request_irq(adapter->msix_entries[vector].vector, | ||
1055 | &igbvf_intr_msix_tx, 0, adapter->tx_ring->name, | ||
1056 | netdev); | ||
1057 | if (err) | ||
1058 | goto out; | ||
1059 | |||
1060 | adapter->tx_ring->itr_register = E1000_EITR(vector); | ||
1061 | adapter->tx_ring->itr_val = 1952; | ||
1062 | vector++; | ||
1063 | |||
1064 | err = request_irq(adapter->msix_entries[vector].vector, | ||
1065 | &igbvf_intr_msix_rx, 0, adapter->rx_ring->name, | ||
1066 | netdev); | ||
1067 | if (err) | ||
1068 | goto out; | ||
1069 | |||
1070 | adapter->rx_ring->itr_register = E1000_EITR(vector); | ||
1071 | adapter->rx_ring->itr_val = 1952; | ||
1072 | vector++; | ||
1073 | |||
1074 | err = request_irq(adapter->msix_entries[vector].vector, | ||
1075 | &igbvf_msix_other, 0, netdev->name, netdev); | ||
1076 | if (err) | ||
1077 | goto out; | ||
1078 | |||
1079 | igbvf_configure_msix(adapter); | ||
1080 | return 0; | ||
1081 | out: | ||
1082 | return err; | ||
1083 | } | ||
1084 | |||
1085 | /** | ||
1086 | * igbvf_alloc_queues - Allocate memory for all rings | ||
1087 | * @adapter: board private structure to initialize | ||
1088 | **/ | ||
1089 | static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) | ||
1090 | { | ||
1091 | struct net_device *netdev = adapter->netdev; | ||
1092 | |||
1093 | adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); | ||
1094 | if (!adapter->tx_ring) | ||
1095 | return -ENOMEM; | ||
1096 | |||
1097 | adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); | ||
1098 | if (!adapter->rx_ring) { | ||
1099 | kfree(adapter->tx_ring); | ||
1100 | return -ENOMEM; | ||
1101 | } | ||
1102 | |||
1103 | netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); | ||
1104 | |||
1105 | return 0; | ||
1106 | } | ||
1107 | |||
1108 | /** | ||
1109 | * igbvf_request_irq - initialize interrupts | ||
1110 | * | ||
1111 | * Attempts to configure interrupts using the best available | ||
1112 | * capabilities of the hardware and kernel. | ||
1113 | **/ | ||
1114 | static int igbvf_request_irq(struct igbvf_adapter *adapter) | ||
1115 | { | ||
1116 | int err = -1; | ||
1117 | |||
1118 | /* igbvf supports msi-x only */ | ||
1119 | if (adapter->msix_entries) | ||
1120 | err = igbvf_request_msix(adapter); | ||
1121 | |||
1122 | if (!err) | ||
1123 | return err; | ||
1124 | |||
1125 | dev_err(&adapter->pdev->dev, | ||
1126 | "Unable to allocate interrupt, Error: %d\n", err); | ||
1127 | |||
1128 | return err; | ||
1129 | } | ||
1130 | |||
1131 | static void igbvf_free_irq(struct igbvf_adapter *adapter) | ||
1132 | { | ||
1133 | struct net_device *netdev = adapter->netdev; | ||
1134 | int vector; | ||
1135 | |||
1136 | if (adapter->msix_entries) { | ||
1137 | for (vector = 0; vector < 3; vector++) | ||
1138 | free_irq(adapter->msix_entries[vector].vector, netdev); | ||
1139 | } | ||
1140 | } | ||
1141 | |||
1142 | /** | ||
1143 | * igbvf_irq_disable - Mask off interrupt generation on the NIC | ||
1144 | **/ | ||
1145 | static void igbvf_irq_disable(struct igbvf_adapter *adapter) | ||
1146 | { | ||
1147 | struct e1000_hw *hw = &adapter->hw; | ||
1148 | |||
1149 | ew32(EIMC, ~0); | ||
1150 | |||
1151 | if (adapter->msix_entries) | ||
1152 | ew32(EIAC, 0); | ||
1153 | } | ||
1154 | |||
1155 | /** | ||
1156 | * igbvf_irq_enable - Enable default interrupt generation settings | ||
1157 | **/ | ||
1158 | static void igbvf_irq_enable(struct igbvf_adapter *adapter) | ||
1159 | { | ||
1160 | struct e1000_hw *hw = &adapter->hw; | ||
1161 | |||
1162 | ew32(EIAC, adapter->eims_enable_mask); | ||
1163 | ew32(EIAM, adapter->eims_enable_mask); | ||
1164 | ew32(EIMS, adapter->eims_enable_mask); | ||
1165 | } | ||
1166 | |||
1167 | /** | ||
1168 | * igbvf_poll - NAPI Rx polling callback | ||
1169 | * @napi: struct associated with this polling callback | ||
1170 | * @budget: amount of packets driver is allowed to process this poll | ||
1171 | **/ | ||
1172 | static int igbvf_poll(struct napi_struct *napi, int budget) | ||
1173 | { | ||
1174 | struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); | ||
1175 | struct igbvf_adapter *adapter = rx_ring->adapter; | ||
1176 | struct e1000_hw *hw = &adapter->hw; | ||
1177 | int work_done = 0; | ||
1178 | |||
1179 | igbvf_clean_rx_irq(adapter, &work_done, budget); | ||
1180 | |||
1181 | /* If not enough Rx work done, exit the polling mode */ | ||
1182 | if (work_done < budget) { | ||
1183 | napi_complete(napi); | ||
1184 | |||
1185 | if (adapter->itr_setting & 3) | ||
1186 | igbvf_set_itr(adapter); | ||
1187 | |||
1188 | if (!test_bit(__IGBVF_DOWN, &adapter->state)) | ||
1189 | ew32(EIMS, adapter->rx_ring->eims_value); | ||
1190 | } | ||
1191 | |||
1192 | return work_done; | ||
1193 | } | ||
1194 | |||
1195 | /** | ||
1196 | * igbvf_set_rlpml - set receive large packet maximum length | ||
1197 | * @adapter: board private structure | ||
1198 | * | ||
1199 | * Configure the maximum size of packets that will be received | ||
1200 | */ | ||
1201 | static void igbvf_set_rlpml(struct igbvf_adapter *adapter) | ||
1202 | { | ||
1203 | int max_frame_size = adapter->max_frame_size; | ||
1204 | struct e1000_hw *hw = &adapter->hw; | ||
1205 | |||
1206 | if (adapter->vlgrp) | ||
1207 | max_frame_size += VLAN_TAG_SIZE; | ||
1208 | |||
1209 | e1000_rlpml_set_vf(hw, max_frame_size); | ||
1210 | } | ||
1211 | |||
1212 | static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | ||
1213 | { | ||
1214 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
1215 | struct e1000_hw *hw = &adapter->hw; | ||
1216 | |||
1217 | if (hw->mac.ops.set_vfta(hw, vid, true)) | ||
1218 | dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); | ||
1219 | } | ||
1220 | |||
1221 | static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | ||
1222 | { | ||
1223 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
1224 | struct e1000_hw *hw = &adapter->hw; | ||
1225 | |||
1226 | igbvf_irq_disable(adapter); | ||
1227 | vlan_group_set_device(adapter->vlgrp, vid, NULL); | ||
1228 | |||
1229 | if (!test_bit(__IGBVF_DOWN, &adapter->state)) | ||
1230 | igbvf_irq_enable(adapter); | ||
1231 | |||
1232 | if (hw->mac.ops.set_vfta(hw, vid, false)) | ||
1233 | dev_err(&adapter->pdev->dev, | ||
1234 | "Failed to remove vlan id %d\n", vid); | ||
1235 | } | ||
1236 | |||
1237 | static void igbvf_vlan_rx_register(struct net_device *netdev, | ||
1238 | struct vlan_group *grp) | ||
1239 | { | ||
1240 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
1241 | |||
1242 | adapter->vlgrp = grp; | ||
1243 | } | ||
1244 | |||
1245 | static void igbvf_restore_vlan(struct igbvf_adapter *adapter) | ||
1246 | { | ||
1247 | u16 vid; | ||
1248 | |||
1249 | if (!adapter->vlgrp) | ||
1250 | return; | ||
1251 | |||
1252 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | ||
1253 | if (!vlan_group_get_device(adapter->vlgrp, vid)) | ||
1254 | continue; | ||
1255 | igbvf_vlan_rx_add_vid(adapter->netdev, vid); | ||
1256 | } | ||
1257 | |||
1258 | igbvf_set_rlpml(adapter); | ||
1259 | } | ||
1260 | |||
1261 | /** | ||
1262 | * igbvf_configure_tx - Configure Transmit Unit after Reset | ||
1263 | * @adapter: board private structure | ||
1264 | * | ||
1265 | * Configure the Tx unit of the MAC after a reset. | ||
1266 | **/ | ||
1267 | static void igbvf_configure_tx(struct igbvf_adapter *adapter) | ||
1268 | { | ||
1269 | struct e1000_hw *hw = &adapter->hw; | ||
1270 | struct igbvf_ring *tx_ring = adapter->tx_ring; | ||
1271 | u64 tdba; | ||
1272 | u32 txdctl, dca_txctrl; | ||
1273 | |||
1274 | /* disable transmits */ | ||
1275 | txdctl = er32(TXDCTL(0)); | ||
1276 | ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); | ||
1277 | msleep(10); | ||
1278 | |||
1279 | /* Setup the HW Tx Head and Tail descriptor pointers */ | ||
1280 | ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); | ||
1281 | tdba = tx_ring->dma; | ||
1282 | ew32(TDBAL(0), (tdba & DMA_32BIT_MASK)); | ||
1283 | ew32(TDBAH(0), (tdba >> 32)); | ||
1284 | ew32(TDH(0), 0); | ||
1285 | ew32(TDT(0), 0); | ||
1286 | tx_ring->head = E1000_TDH(0); | ||
1287 | tx_ring->tail = E1000_TDT(0); | ||
1288 | |||
1289 | /* Turn off Relaxed Ordering on head write-backs. The writebacks | ||
1290 | * MUST be delivered in order or it will completely screw up | ||
1291 | * our bookeeping. | ||
1292 | */ | ||
1293 | dca_txctrl = er32(DCA_TXCTRL(0)); | ||
1294 | dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; | ||
1295 | ew32(DCA_TXCTRL(0), dca_txctrl); | ||
1296 | |||
1297 | /* enable transmits */ | ||
1298 | txdctl |= E1000_TXDCTL_QUEUE_ENABLE; | ||
1299 | ew32(TXDCTL(0), txdctl); | ||
1300 | |||
1301 | /* Setup Transmit Descriptor Settings for eop descriptor */ | ||
1302 | adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; | ||
1303 | |||
1304 | /* enable Report Status bit */ | ||
1305 | adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; | ||
1306 | |||
1307 | adapter->tx_queue_len = adapter->netdev->tx_queue_len; | ||
1308 | } | ||
1309 | |||
1310 | /** | ||
1311 | * igbvf_setup_srrctl - configure the receive control registers | ||
1312 | * @adapter: Board private structure | ||
1313 | **/ | ||
1314 | static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) | ||
1315 | { | ||
1316 | struct e1000_hw *hw = &adapter->hw; | ||
1317 | u32 srrctl = 0; | ||
1318 | |||
1319 | srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | | ||
1320 | E1000_SRRCTL_BSIZEHDR_MASK | | ||
1321 | E1000_SRRCTL_BSIZEPKT_MASK); | ||
1322 | |||
1323 | /* Enable queue drop to avoid head of line blocking */ | ||
1324 | srrctl |= E1000_SRRCTL_DROP_EN; | ||
1325 | |||
1326 | /* Setup buffer sizes */ | ||
1327 | srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> | ||
1328 | E1000_SRRCTL_BSIZEPKT_SHIFT; | ||
1329 | |||
1330 | if (adapter->rx_buffer_len < 2048) { | ||
1331 | adapter->rx_ps_hdr_size = 0; | ||
1332 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; | ||
1333 | } else { | ||
1334 | adapter->rx_ps_hdr_size = 128; | ||
1335 | srrctl |= adapter->rx_ps_hdr_size << | ||
1336 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; | ||
1337 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | ||
1338 | } | ||
1339 | |||
1340 | ew32(SRRCTL(0), srrctl); | ||
1341 | } | ||
1342 | |||
1343 | /** | ||
1344 | * igbvf_configure_rx - Configure Receive Unit after Reset | ||
1345 | * @adapter: board private structure | ||
1346 | * | ||
1347 | * Configure the Rx unit of the MAC after a reset. | ||
1348 | **/ | ||
1349 | static void igbvf_configure_rx(struct igbvf_adapter *adapter) | ||
1350 | { | ||
1351 | struct e1000_hw *hw = &adapter->hw; | ||
1352 | struct igbvf_ring *rx_ring = adapter->rx_ring; | ||
1353 | u64 rdba; | ||
1354 | u32 rdlen, rxdctl; | ||
1355 | |||
1356 | /* disable receives */ | ||
1357 | rxdctl = er32(RXDCTL(0)); | ||
1358 | ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); | ||
1359 | msleep(10); | ||
1360 | |||
1361 | rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); | ||
1362 | |||
1363 | /* | ||
1364 | * Setup the HW Rx Head and Tail Descriptor Pointers and | ||
1365 | * the Base and Length of the Rx Descriptor Ring | ||
1366 | */ | ||
1367 | rdba = rx_ring->dma; | ||
1368 | ew32(RDBAL(0), (rdba & DMA_32BIT_MASK)); | ||
1369 | ew32(RDBAH(0), (rdba >> 32)); | ||
1370 | ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); | ||
1371 | rx_ring->head = E1000_RDH(0); | ||
1372 | rx_ring->tail = E1000_RDT(0); | ||
1373 | ew32(RDH(0), 0); | ||
1374 | ew32(RDT(0), 0); | ||
1375 | |||
1376 | rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; | ||
1377 | rxdctl &= 0xFFF00000; | ||
1378 | rxdctl |= IGBVF_RX_PTHRESH; | ||
1379 | rxdctl |= IGBVF_RX_HTHRESH << 8; | ||
1380 | rxdctl |= IGBVF_RX_WTHRESH << 16; | ||
1381 | |||
1382 | igbvf_set_rlpml(adapter); | ||
1383 | |||
1384 | /* enable receives */ | ||
1385 | ew32(RXDCTL(0), rxdctl); | ||
1386 | } | ||
1387 | |||
1388 | /** | ||
1389 | * igbvf_set_multi - Multicast and Promiscuous mode set | ||
1390 | * @netdev: network interface device structure | ||
1391 | * | ||
1392 | * The set_multi entry point is called whenever the multicast address | ||
1393 | * list or the network interface flags are updated. This routine is | ||
1394 | * responsible for configuring the hardware for proper multicast, | ||
1395 | * promiscuous mode, and all-multi behavior. | ||
1396 | **/ | ||
1397 | static void igbvf_set_multi(struct net_device *netdev) | ||
1398 | { | ||
1399 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
1400 | struct e1000_hw *hw = &adapter->hw; | ||
1401 | struct dev_mc_list *mc_ptr; | ||
1402 | u8 *mta_list = NULL; | ||
1403 | int i; | ||
1404 | |||
1405 | if (netdev->mc_count) { | ||
1406 | mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); | ||
1407 | if (!mta_list) { | ||
1408 | dev_err(&adapter->pdev->dev, | ||
1409 | "failed to allocate multicast filter list\n"); | ||
1410 | return; | ||
1411 | } | ||
1412 | } | ||
1413 | |||
1414 | /* prepare a packed array of only addresses. */ | ||
1415 | mc_ptr = netdev->mc_list; | ||
1416 | |||
1417 | for (i = 0; i < netdev->mc_count; i++) { | ||
1418 | if (!mc_ptr) | ||
1419 | break; | ||
1420 | memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, | ||
1421 | ETH_ALEN); | ||
1422 | mc_ptr = mc_ptr->next; | ||
1423 | } | ||
1424 | |||
1425 | hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); | ||
1426 | kfree(mta_list); | ||
1427 | } | ||
1428 | |||
1429 | /** | ||
1430 | * igbvf_configure - configure the hardware for Rx and Tx | ||
1431 | * @adapter: private board structure | ||
1432 | **/ | ||
1433 | static void igbvf_configure(struct igbvf_adapter *adapter) | ||
1434 | { | ||
1435 | igbvf_set_multi(adapter->netdev); | ||
1436 | |||
1437 | igbvf_restore_vlan(adapter); | ||
1438 | |||
1439 | igbvf_configure_tx(adapter); | ||
1440 | igbvf_setup_srrctl(adapter); | ||
1441 | igbvf_configure_rx(adapter); | ||
1442 | igbvf_alloc_rx_buffers(adapter->rx_ring, | ||
1443 | igbvf_desc_unused(adapter->rx_ring)); | ||
1444 | } | ||
1445 | |||
1446 | /* igbvf_reset - bring the hardware into a known good state | ||
1447 | * | ||
1448 | * This function boots the hardware and enables some settings that | ||
1449 | * require a configuration cycle of the hardware - those cannot be | ||
1450 | * set/changed during runtime. After reset the device needs to be | ||
1451 | * properly configured for Rx, Tx etc. | ||
1452 | */ | ||
1453 | static void igbvf_reset(struct igbvf_adapter *adapter) | ||
1454 | { | ||
1455 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
1456 | struct net_device *netdev = adapter->netdev; | ||
1457 | struct e1000_hw *hw = &adapter->hw; | ||
1458 | |||
1459 | /* Allow time for pending master requests to run */ | ||
1460 | if (mac->ops.reset_hw(hw)) | ||
1461 | dev_err(&adapter->pdev->dev, "PF still resetting\n"); | ||
1462 | |||
1463 | mac->ops.init_hw(hw); | ||
1464 | |||
1465 | if (is_valid_ether_addr(adapter->hw.mac.addr)) { | ||
1466 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, | ||
1467 | netdev->addr_len); | ||
1468 | memcpy(netdev->perm_addr, adapter->hw.mac.addr, | ||
1469 | netdev->addr_len); | ||
1470 | } | ||
1471 | } | ||
1472 | |||
1473 | int igbvf_up(struct igbvf_adapter *adapter) | ||
1474 | { | ||
1475 | struct e1000_hw *hw = &adapter->hw; | ||
1476 | |||
1477 | /* hardware has been reset, we need to reload some things */ | ||
1478 | igbvf_configure(adapter); | ||
1479 | |||
1480 | clear_bit(__IGBVF_DOWN, &adapter->state); | ||
1481 | |||
1482 | napi_enable(&adapter->rx_ring->napi); | ||
1483 | if (adapter->msix_entries) | ||
1484 | igbvf_configure_msix(adapter); | ||
1485 | |||
1486 | /* Clear any pending interrupts. */ | ||
1487 | er32(EICR); | ||
1488 | igbvf_irq_enable(adapter); | ||
1489 | |||
1490 | /* start the watchdog */ | ||
1491 | hw->mac.get_link_status = 1; | ||
1492 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
1493 | |||
1494 | |||
1495 | return 0; | ||
1496 | } | ||
1497 | |||
1498 | void igbvf_down(struct igbvf_adapter *adapter) | ||
1499 | { | ||
1500 | struct net_device *netdev = adapter->netdev; | ||
1501 | struct e1000_hw *hw = &adapter->hw; | ||
1502 | u32 rxdctl, txdctl; | ||
1503 | |||
1504 | /* | ||
1505 | * signal that we're down so the interrupt handler does not | ||
1506 | * reschedule our watchdog timer | ||
1507 | */ | ||
1508 | set_bit(__IGBVF_DOWN, &adapter->state); | ||
1509 | |||
1510 | /* disable receives in the hardware */ | ||
1511 | rxdctl = er32(RXDCTL(0)); | ||
1512 | ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); | ||
1513 | |||
1514 | netif_stop_queue(netdev); | ||
1515 | |||
1516 | /* disable transmits in the hardware */ | ||
1517 | txdctl = er32(TXDCTL(0)); | ||
1518 | ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); | ||
1519 | |||
1520 | /* flush both disables and wait for them to finish */ | ||
1521 | e1e_flush(); | ||
1522 | msleep(10); | ||
1523 | |||
1524 | napi_disable(&adapter->rx_ring->napi); | ||
1525 | |||
1526 | igbvf_irq_disable(adapter); | ||
1527 | |||
1528 | del_timer_sync(&adapter->watchdog_timer); | ||
1529 | |||
1530 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1531 | netif_carrier_off(netdev); | ||
1532 | |||
1533 | /* record the stats before reset*/ | ||
1534 | igbvf_update_stats(adapter); | ||
1535 | |||
1536 | adapter->link_speed = 0; | ||
1537 | adapter->link_duplex = 0; | ||
1538 | |||
1539 | igbvf_reset(adapter); | ||
1540 | igbvf_clean_tx_ring(adapter->tx_ring); | ||
1541 | igbvf_clean_rx_ring(adapter->rx_ring); | ||
1542 | } | ||
1543 | |||
1544 | void igbvf_reinit_locked(struct igbvf_adapter *adapter) | ||
1545 | { | ||
1546 | might_sleep(); | ||
1547 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) | ||
1548 | msleep(1); | ||
1549 | igbvf_down(adapter); | ||
1550 | igbvf_up(adapter); | ||
1551 | clear_bit(__IGBVF_RESETTING, &adapter->state); | ||
1552 | } | ||
1553 | |||
1554 | /** | ||
1555 | * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) | ||
1556 | * @adapter: board private structure to initialize | ||
1557 | * | ||
1558 | * igbvf_sw_init initializes the Adapter private data structure. | ||
1559 | * Fields are initialized based on PCI device information and | ||
1560 | * OS network device settings (MTU size). | ||
1561 | **/ | ||
1562 | static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) | ||
1563 | { | ||
1564 | struct net_device *netdev = adapter->netdev; | ||
1565 | s32 rc; | ||
1566 | |||
1567 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; | ||
1568 | adapter->rx_ps_hdr_size = 0; | ||
1569 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | ||
1570 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | ||
1571 | |||
1572 | adapter->tx_int_delay = 8; | ||
1573 | adapter->tx_abs_int_delay = 32; | ||
1574 | adapter->rx_int_delay = 0; | ||
1575 | adapter->rx_abs_int_delay = 8; | ||
1576 | adapter->itr_setting = 3; | ||
1577 | adapter->itr = 20000; | ||
1578 | |||
1579 | /* Set various function pointers */ | ||
1580 | adapter->ei->init_ops(&adapter->hw); | ||
1581 | |||
1582 | rc = adapter->hw.mac.ops.init_params(&adapter->hw); | ||
1583 | if (rc) | ||
1584 | return rc; | ||
1585 | |||
1586 | rc = adapter->hw.mbx.ops.init_params(&adapter->hw); | ||
1587 | if (rc) | ||
1588 | return rc; | ||
1589 | |||
1590 | igbvf_set_interrupt_capability(adapter); | ||
1591 | |||
1592 | if (igbvf_alloc_queues(adapter)) | ||
1593 | return -ENOMEM; | ||
1594 | |||
1595 | spin_lock_init(&adapter->tx_queue_lock); | ||
1596 | |||
1597 | /* Explicitly disable IRQ since the NIC can be in any state. */ | ||
1598 | igbvf_irq_disable(adapter); | ||
1599 | |||
1600 | spin_lock_init(&adapter->stats_lock); | ||
1601 | |||
1602 | set_bit(__IGBVF_DOWN, &adapter->state); | ||
1603 | return 0; | ||
1604 | } | ||
1605 | |||
1606 | static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) | ||
1607 | { | ||
1608 | struct e1000_hw *hw = &adapter->hw; | ||
1609 | |||
1610 | adapter->stats.last_gprc = er32(VFGPRC); | ||
1611 | adapter->stats.last_gorc = er32(VFGORC); | ||
1612 | adapter->stats.last_gptc = er32(VFGPTC); | ||
1613 | adapter->stats.last_gotc = er32(VFGOTC); | ||
1614 | adapter->stats.last_mprc = er32(VFMPRC); | ||
1615 | adapter->stats.last_gotlbc = er32(VFGOTLBC); | ||
1616 | adapter->stats.last_gptlbc = er32(VFGPTLBC); | ||
1617 | adapter->stats.last_gorlbc = er32(VFGORLBC); | ||
1618 | adapter->stats.last_gprlbc = er32(VFGPRLBC); | ||
1619 | |||
1620 | adapter->stats.base_gprc = er32(VFGPRC); | ||
1621 | adapter->stats.base_gorc = er32(VFGORC); | ||
1622 | adapter->stats.base_gptc = er32(VFGPTC); | ||
1623 | adapter->stats.base_gotc = er32(VFGOTC); | ||
1624 | adapter->stats.base_mprc = er32(VFMPRC); | ||
1625 | adapter->stats.base_gotlbc = er32(VFGOTLBC); | ||
1626 | adapter->stats.base_gptlbc = er32(VFGPTLBC); | ||
1627 | adapter->stats.base_gorlbc = er32(VFGORLBC); | ||
1628 | adapter->stats.base_gprlbc = er32(VFGPRLBC); | ||
1629 | } | ||
1630 | |||
1631 | /** | ||
1632 | * igbvf_open - Called when a network interface is made active | ||
1633 | * @netdev: network interface device structure | ||
1634 | * | ||
1635 | * Returns 0 on success, negative value on failure | ||
1636 | * | ||
1637 | * The open entry point is called when a network interface is made | ||
1638 | * active by the system (IFF_UP). At this point all resources needed | ||
1639 | * for transmit and receive operations are allocated, the interrupt | ||
1640 | * handler is registered with the OS, the watchdog timer is started, | ||
1641 | * and the stack is notified that the interface is ready. | ||
1642 | **/ | ||
1643 | static int igbvf_open(struct net_device *netdev) | ||
1644 | { | ||
1645 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
1646 | struct e1000_hw *hw = &adapter->hw; | ||
1647 | int err; | ||
1648 | |||
1649 | /* disallow open during test */ | ||
1650 | if (test_bit(__IGBVF_TESTING, &adapter->state)) | ||
1651 | return -EBUSY; | ||
1652 | |||
1653 | /* allocate transmit descriptors */ | ||
1654 | err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); | ||
1655 | if (err) | ||
1656 | goto err_setup_tx; | ||
1657 | |||
1658 | /* allocate receive descriptors */ | ||
1659 | err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); | ||
1660 | if (err) | ||
1661 | goto err_setup_rx; | ||
1662 | |||
1663 | /* | ||
1664 | * before we allocate an interrupt, we must be ready to handle it. | ||
1665 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | ||
1666 | * as soon as we call pci_request_irq, so we have to setup our | ||
1667 | * clean_rx handler before we do so. | ||
1668 | */ | ||
1669 | igbvf_configure(adapter); | ||
1670 | |||
1671 | err = igbvf_request_irq(adapter); | ||
1672 | if (err) | ||
1673 | goto err_req_irq; | ||
1674 | |||
1675 | /* From here on the code is the same as igbvf_up() */ | ||
1676 | clear_bit(__IGBVF_DOWN, &adapter->state); | ||
1677 | |||
1678 | napi_enable(&adapter->rx_ring->napi); | ||
1679 | |||
1680 | /* clear any pending interrupts */ | ||
1681 | er32(EICR); | ||
1682 | |||
1683 | igbvf_irq_enable(adapter); | ||
1684 | |||
1685 | /* start the watchdog */ | ||
1686 | hw->mac.get_link_status = 1; | ||
1687 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
1688 | |||
1689 | return 0; | ||
1690 | |||
1691 | err_req_irq: | ||
1692 | igbvf_free_rx_resources(adapter->rx_ring); | ||
1693 | err_setup_rx: | ||
1694 | igbvf_free_tx_resources(adapter->tx_ring); | ||
1695 | err_setup_tx: | ||
1696 | igbvf_reset(adapter); | ||
1697 | |||
1698 | return err; | ||
1699 | } | ||
1700 | |||
1701 | /** | ||
1702 | * igbvf_close - Disables a network interface | ||
1703 | * @netdev: network interface device structure | ||
1704 | * | ||
1705 | * Returns 0, this is not allowed to fail | ||
1706 | * | ||
1707 | * The close entry point is called when an interface is de-activated | ||
1708 | * by the OS. The hardware is still under the drivers control, but | ||
1709 | * needs to be disabled. A global MAC reset is issued to stop the | ||
1710 | * hardware, and all transmit and receive resources are freed. | ||
1711 | **/ | ||
1712 | static int igbvf_close(struct net_device *netdev) | ||
1713 | { | ||
1714 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
1715 | |||
1716 | WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); | ||
1717 | igbvf_down(adapter); | ||
1718 | |||
1719 | igbvf_free_irq(adapter); | ||
1720 | |||
1721 | igbvf_free_tx_resources(adapter->tx_ring); | ||
1722 | igbvf_free_rx_resources(adapter->rx_ring); | ||
1723 | |||
1724 | return 0; | ||
1725 | } | ||
1726 | /** | ||
1727 | * igbvf_set_mac - Change the Ethernet Address of the NIC | ||
1728 | * @netdev: network interface device structure | ||
1729 | * @p: pointer to an address structure | ||
1730 | * | ||
1731 | * Returns 0 on success, negative on failure | ||
1732 | **/ | ||
1733 | static int igbvf_set_mac(struct net_device *netdev, void *p) | ||
1734 | { | ||
1735 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
1736 | struct e1000_hw *hw = &adapter->hw; | ||
1737 | struct sockaddr *addr = p; | ||
1738 | |||
1739 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1740 | return -EADDRNOTAVAIL; | ||
1741 | |||
1742 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); | ||
1743 | |||
1744 | hw->mac.ops.rar_set(hw, hw->mac.addr, 0); | ||
1745 | |||
1746 | if (memcmp(addr->sa_data, hw->mac.addr, 6)) | ||
1747 | return -EADDRNOTAVAIL; | ||
1748 | |||
1749 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
1750 | |||
1751 | return 0; | ||
1752 | } | ||
1753 | |||
1754 | #define UPDATE_VF_COUNTER(reg, name) \ | ||
1755 | { \ | ||
1756 | u32 current_counter = er32(reg); \ | ||
1757 | if (current_counter < adapter->stats.last_##name) \ | ||
1758 | adapter->stats.name += 0x100000000LL; \ | ||
1759 | adapter->stats.last_##name = current_counter; \ | ||
1760 | adapter->stats.name &= 0xFFFFFFFF00000000LL; \ | ||
1761 | adapter->stats.name |= current_counter; \ | ||
1762 | } | ||
1763 | |||
1764 | /** | ||
1765 | * igbvf_update_stats - Update the board statistics counters | ||
1766 | * @adapter: board private structure | ||
1767 | **/ | ||
1768 | void igbvf_update_stats(struct igbvf_adapter *adapter) | ||
1769 | { | ||
1770 | struct e1000_hw *hw = &adapter->hw; | ||
1771 | struct pci_dev *pdev = adapter->pdev; | ||
1772 | |||
1773 | /* | ||
1774 | * Prevent stats update while adapter is being reset, link is down | ||
1775 | * or if the pci connection is down. | ||
1776 | */ | ||
1777 | if (adapter->link_speed == 0) | ||
1778 | return; | ||
1779 | |||
1780 | if (test_bit(__IGBVF_RESETTING, &adapter->state)) | ||
1781 | return; | ||
1782 | |||
1783 | if (pci_channel_offline(pdev)) | ||
1784 | return; | ||
1785 | |||
1786 | UPDATE_VF_COUNTER(VFGPRC, gprc); | ||
1787 | UPDATE_VF_COUNTER(VFGORC, gorc); | ||
1788 | UPDATE_VF_COUNTER(VFGPTC, gptc); | ||
1789 | UPDATE_VF_COUNTER(VFGOTC, gotc); | ||
1790 | UPDATE_VF_COUNTER(VFMPRC, mprc); | ||
1791 | UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); | ||
1792 | UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); | ||
1793 | UPDATE_VF_COUNTER(VFGORLBC, gorlbc); | ||
1794 | UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); | ||
1795 | |||
1796 | /* Fill out the OS statistics structure */ | ||
1797 | adapter->net_stats.multicast = adapter->stats.mprc; | ||
1798 | } | ||
1799 | |||
1800 | static void igbvf_print_link_info(struct igbvf_adapter *adapter) | ||
1801 | { | ||
1802 | dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n", | ||
1803 | adapter->link_speed, | ||
1804 | ((adapter->link_duplex == FULL_DUPLEX) ? | ||
1805 | "Full Duplex" : "Half Duplex")); | ||
1806 | } | ||
1807 | |||
1808 | static bool igbvf_has_link(struct igbvf_adapter *adapter) | ||
1809 | { | ||
1810 | struct e1000_hw *hw = &adapter->hw; | ||
1811 | s32 ret_val = E1000_SUCCESS; | ||
1812 | bool link_active; | ||
1813 | |||
1814 | ret_val = hw->mac.ops.check_for_link(hw); | ||
1815 | link_active = !hw->mac.get_link_status; | ||
1816 | |||
1817 | /* if check for link returns error we will need to reset */ | ||
1818 | if (ret_val) | ||
1819 | schedule_work(&adapter->reset_task); | ||
1820 | |||
1821 | return link_active; | ||
1822 | } | ||
1823 | |||
1824 | /** | ||
1825 | * igbvf_watchdog - Timer Call-back | ||
1826 | * @data: pointer to adapter cast into an unsigned long | ||
1827 | **/ | ||
1828 | static void igbvf_watchdog(unsigned long data) | ||
1829 | { | ||
1830 | struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; | ||
1831 | |||
1832 | /* Do the rest outside of interrupt context */ | ||
1833 | schedule_work(&adapter->watchdog_task); | ||
1834 | } | ||
1835 | |||
1836 | static void igbvf_watchdog_task(struct work_struct *work) | ||
1837 | { | ||
1838 | struct igbvf_adapter *adapter = container_of(work, | ||
1839 | struct igbvf_adapter, | ||
1840 | watchdog_task); | ||
1841 | struct net_device *netdev = adapter->netdev; | ||
1842 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
1843 | struct igbvf_ring *tx_ring = adapter->tx_ring; | ||
1844 | struct e1000_hw *hw = &adapter->hw; | ||
1845 | u32 link; | ||
1846 | int tx_pending = 0; | ||
1847 | |||
1848 | link = igbvf_has_link(adapter); | ||
1849 | |||
1850 | if (link) { | ||
1851 | if (!netif_carrier_ok(netdev)) { | ||
1852 | bool txb2b = 1; | ||
1853 | |||
1854 | mac->ops.get_link_up_info(&adapter->hw, | ||
1855 | &adapter->link_speed, | ||
1856 | &adapter->link_duplex); | ||
1857 | igbvf_print_link_info(adapter); | ||
1858 | |||
1859 | /* | ||
1860 | * tweak tx_queue_len according to speed/duplex | ||
1861 | * and adjust the timeout factor | ||
1862 | */ | ||
1863 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1864 | adapter->tx_timeout_factor = 1; | ||
1865 | switch (adapter->link_speed) { | ||
1866 | case SPEED_10: | ||
1867 | txb2b = 0; | ||
1868 | netdev->tx_queue_len = 10; | ||
1869 | adapter->tx_timeout_factor = 16; | ||
1870 | break; | ||
1871 | case SPEED_100: | ||
1872 | txb2b = 0; | ||
1873 | netdev->tx_queue_len = 100; | ||
1874 | /* maybe add some timeout factor ? */ | ||
1875 | break; | ||
1876 | } | ||
1877 | |||
1878 | netif_carrier_on(netdev); | ||
1879 | netif_wake_queue(netdev); | ||
1880 | } | ||
1881 | } else { | ||
1882 | if (netif_carrier_ok(netdev)) { | ||
1883 | adapter->link_speed = 0; | ||
1884 | adapter->link_duplex = 0; | ||
1885 | dev_info(&adapter->pdev->dev, "Link is Down\n"); | ||
1886 | netif_carrier_off(netdev); | ||
1887 | netif_stop_queue(netdev); | ||
1888 | } | ||
1889 | } | ||
1890 | |||
1891 | if (netif_carrier_ok(netdev)) { | ||
1892 | igbvf_update_stats(adapter); | ||
1893 | } else { | ||
1894 | tx_pending = (igbvf_desc_unused(tx_ring) + 1 < | ||
1895 | tx_ring->count); | ||
1896 | if (tx_pending) { | ||
1897 | /* | ||
1898 | * We've lost link, so the controller stops DMA, | ||
1899 | * but we've got queued Tx work that's never going | ||
1900 | * to get done, so reset controller to flush Tx. | ||
1901 | * (Do the reset outside of interrupt context). | ||
1902 | */ | ||
1903 | adapter->tx_timeout_count++; | ||
1904 | schedule_work(&adapter->reset_task); | ||
1905 | } | ||
1906 | } | ||
1907 | |||
1908 | /* Cause software interrupt to ensure Rx ring is cleaned */ | ||
1909 | ew32(EICS, adapter->rx_ring->eims_value); | ||
1910 | |||
1911 | /* Force detection of hung controller every watchdog period */ | ||
1912 | adapter->detect_tx_hung = 1; | ||
1913 | |||
1914 | /* Reset the timer */ | ||
1915 | if (!test_bit(__IGBVF_DOWN, &adapter->state)) | ||
1916 | mod_timer(&adapter->watchdog_timer, | ||
1917 | round_jiffies(jiffies + (2 * HZ))); | ||
1918 | } | ||
1919 | |||
1920 | #define IGBVF_TX_FLAGS_CSUM 0x00000001 | ||
1921 | #define IGBVF_TX_FLAGS_VLAN 0x00000002 | ||
1922 | #define IGBVF_TX_FLAGS_TSO 0x00000004 | ||
1923 | #define IGBVF_TX_FLAGS_IPV4 0x00000008 | ||
1924 | #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 | ||
1925 | #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 | ||
1926 | |||
1927 | static int igbvf_tso(struct igbvf_adapter *adapter, | ||
1928 | struct igbvf_ring *tx_ring, | ||
1929 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) | ||
1930 | { | ||
1931 | struct e1000_adv_tx_context_desc *context_desc; | ||
1932 | unsigned int i; | ||
1933 | int err; | ||
1934 | struct igbvf_buffer *buffer_info; | ||
1935 | u32 info = 0, tu_cmd = 0; | ||
1936 | u32 mss_l4len_idx, l4len; | ||
1937 | *hdr_len = 0; | ||
1938 | |||
1939 | if (skb_header_cloned(skb)) { | ||
1940 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
1941 | if (err) { | ||
1942 | dev_err(&adapter->pdev->dev, | ||
1943 | "igbvf_tso returning an error\n"); | ||
1944 | return err; | ||
1945 | } | ||
1946 | } | ||
1947 | |||
1948 | l4len = tcp_hdrlen(skb); | ||
1949 | *hdr_len += l4len; | ||
1950 | |||
1951 | if (skb->protocol == htons(ETH_P_IP)) { | ||
1952 | struct iphdr *iph = ip_hdr(skb); | ||
1953 | iph->tot_len = 0; | ||
1954 | iph->check = 0; | ||
1955 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
1956 | iph->daddr, 0, | ||
1957 | IPPROTO_TCP, | ||
1958 | 0); | ||
1959 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | ||
1960 | ipv6_hdr(skb)->payload_len = 0; | ||
1961 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
1962 | &ipv6_hdr(skb)->daddr, | ||
1963 | 0, IPPROTO_TCP, 0); | ||
1964 | } | ||
1965 | |||
1966 | i = tx_ring->next_to_use; | ||
1967 | |||
1968 | buffer_info = &tx_ring->buffer_info[i]; | ||
1969 | context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); | ||
1970 | /* VLAN MACLEN IPLEN */ | ||
1971 | if (tx_flags & IGBVF_TX_FLAGS_VLAN) | ||
1972 | info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); | ||
1973 | info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); | ||
1974 | *hdr_len += skb_network_offset(skb); | ||
1975 | info |= (skb_transport_header(skb) - skb_network_header(skb)); | ||
1976 | *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); | ||
1977 | context_desc->vlan_macip_lens = cpu_to_le32(info); | ||
1978 | |||
1979 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ | ||
1980 | tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); | ||
1981 | |||
1982 | if (skb->protocol == htons(ETH_P_IP)) | ||
1983 | tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; | ||
1984 | tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; | ||
1985 | |||
1986 | context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); | ||
1987 | |||
1988 | /* MSS L4LEN IDX */ | ||
1989 | mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); | ||
1990 | mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); | ||
1991 | |||
1992 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | ||
1993 | context_desc->seqnum_seed = 0; | ||
1994 | |||
1995 | buffer_info->time_stamp = jiffies; | ||
1996 | buffer_info->next_to_watch = i; | ||
1997 | buffer_info->dma = 0; | ||
1998 | i++; | ||
1999 | if (i == tx_ring->count) | ||
2000 | i = 0; | ||
2001 | |||
2002 | tx_ring->next_to_use = i; | ||
2003 | |||
2004 | return true; | ||
2005 | } | ||
2006 | |||
2007 | static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, | ||
2008 | struct igbvf_ring *tx_ring, | ||
2009 | struct sk_buff *skb, u32 tx_flags) | ||
2010 | { | ||
2011 | struct e1000_adv_tx_context_desc *context_desc; | ||
2012 | unsigned int i; | ||
2013 | struct igbvf_buffer *buffer_info; | ||
2014 | u32 info = 0, tu_cmd = 0; | ||
2015 | |||
2016 | if ((skb->ip_summed == CHECKSUM_PARTIAL) || | ||
2017 | (tx_flags & IGBVF_TX_FLAGS_VLAN)) { | ||
2018 | i = tx_ring->next_to_use; | ||
2019 | buffer_info = &tx_ring->buffer_info[i]; | ||
2020 | context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); | ||
2021 | |||
2022 | if (tx_flags & IGBVF_TX_FLAGS_VLAN) | ||
2023 | info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); | ||
2024 | |||
2025 | info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); | ||
2026 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
2027 | info |= (skb_transport_header(skb) - | ||
2028 | skb_network_header(skb)); | ||
2029 | |||
2030 | |||
2031 | context_desc->vlan_macip_lens = cpu_to_le32(info); | ||
2032 | |||
2033 | tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); | ||
2034 | |||
2035 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
2036 | switch (skb->protocol) { | ||
2037 | case __constant_htons(ETH_P_IP): | ||
2038 | tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; | ||
2039 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | ||
2040 | tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; | ||
2041 | break; | ||
2042 | case __constant_htons(ETH_P_IPV6): | ||
2043 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | ||
2044 | tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; | ||
2045 | break; | ||
2046 | default: | ||
2047 | break; | ||
2048 | } | ||
2049 | } | ||
2050 | |||
2051 | context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); | ||
2052 | context_desc->seqnum_seed = 0; | ||
2053 | context_desc->mss_l4len_idx = 0; | ||
2054 | |||
2055 | buffer_info->time_stamp = jiffies; | ||
2056 | buffer_info->next_to_watch = i; | ||
2057 | buffer_info->dma = 0; | ||
2058 | i++; | ||
2059 | if (i == tx_ring->count) | ||
2060 | i = 0; | ||
2061 | tx_ring->next_to_use = i; | ||
2062 | |||
2063 | return true; | ||
2064 | } | ||
2065 | |||
2066 | return false; | ||
2067 | } | ||
2068 | |||
2069 | static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) | ||
2070 | { | ||
2071 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2072 | |||
2073 | /* there is enough descriptors then we don't need to worry */ | ||
2074 | if (igbvf_desc_unused(adapter->tx_ring) >= size) | ||
2075 | return 0; | ||
2076 | |||
2077 | netif_stop_queue(netdev); | ||
2078 | |||
2079 | smp_mb(); | ||
2080 | |||
2081 | /* We need to check again just in case room has been made available */ | ||
2082 | if (igbvf_desc_unused(adapter->tx_ring) < size) | ||
2083 | return -EBUSY; | ||
2084 | |||
2085 | netif_wake_queue(netdev); | ||
2086 | |||
2087 | ++adapter->restart_queue; | ||
2088 | return 0; | ||
2089 | } | ||
2090 | |||
2091 | #define IGBVF_MAX_TXD_PWR 16 | ||
2092 | #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) | ||
2093 | |||
2094 | static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | ||
2095 | struct igbvf_ring *tx_ring, | ||
2096 | struct sk_buff *skb, | ||
2097 | unsigned int first) | ||
2098 | { | ||
2099 | struct igbvf_buffer *buffer_info; | ||
2100 | unsigned int len = skb_headlen(skb); | ||
2101 | unsigned int count = 0, i; | ||
2102 | unsigned int f; | ||
2103 | dma_addr_t *map; | ||
2104 | |||
2105 | i = tx_ring->next_to_use; | ||
2106 | |||
2107 | if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { | ||
2108 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | ||
2109 | return 0; | ||
2110 | } | ||
2111 | |||
2112 | map = skb_shinfo(skb)->dma_maps; | ||
2113 | |||
2114 | buffer_info = &tx_ring->buffer_info[i]; | ||
2115 | BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); | ||
2116 | buffer_info->length = len; | ||
2117 | /* set time_stamp *before* dma to help avoid a possible race */ | ||
2118 | buffer_info->time_stamp = jiffies; | ||
2119 | buffer_info->next_to_watch = i; | ||
2120 | buffer_info->dma = map[count]; | ||
2121 | count++; | ||
2122 | |||
2123 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | ||
2124 | struct skb_frag_struct *frag; | ||
2125 | |||
2126 | i++; | ||
2127 | if (i == tx_ring->count) | ||
2128 | i = 0; | ||
2129 | |||
2130 | frag = &skb_shinfo(skb)->frags[f]; | ||
2131 | len = frag->size; | ||
2132 | |||
2133 | buffer_info = &tx_ring->buffer_info[i]; | ||
2134 | BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); | ||
2135 | buffer_info->length = len; | ||
2136 | buffer_info->time_stamp = jiffies; | ||
2137 | buffer_info->next_to_watch = i; | ||
2138 | buffer_info->dma = map[count]; | ||
2139 | count++; | ||
2140 | } | ||
2141 | |||
2142 | tx_ring->buffer_info[i].skb = skb; | ||
2143 | tx_ring->buffer_info[first].next_to_watch = i; | ||
2144 | |||
2145 | return count; | ||
2146 | } | ||
2147 | |||
2148 | static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | ||
2149 | struct igbvf_ring *tx_ring, | ||
2150 | int tx_flags, int count, u32 paylen, | ||
2151 | u8 hdr_len) | ||
2152 | { | ||
2153 | union e1000_adv_tx_desc *tx_desc = NULL; | ||
2154 | struct igbvf_buffer *buffer_info; | ||
2155 | u32 olinfo_status = 0, cmd_type_len; | ||
2156 | unsigned int i; | ||
2157 | |||
2158 | cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | | ||
2159 | E1000_ADVTXD_DCMD_DEXT); | ||
2160 | |||
2161 | if (tx_flags & IGBVF_TX_FLAGS_VLAN) | ||
2162 | cmd_type_len |= E1000_ADVTXD_DCMD_VLE; | ||
2163 | |||
2164 | if (tx_flags & IGBVF_TX_FLAGS_TSO) { | ||
2165 | cmd_type_len |= E1000_ADVTXD_DCMD_TSE; | ||
2166 | |||
2167 | /* insert tcp checksum */ | ||
2168 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; | ||
2169 | |||
2170 | /* insert ip checksum */ | ||
2171 | if (tx_flags & IGBVF_TX_FLAGS_IPV4) | ||
2172 | olinfo_status |= E1000_TXD_POPTS_IXSM << 8; | ||
2173 | |||
2174 | } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { | ||
2175 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; | ||
2176 | } | ||
2177 | |||
2178 | olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); | ||
2179 | |||
2180 | i = tx_ring->next_to_use; | ||
2181 | while (count--) { | ||
2182 | buffer_info = &tx_ring->buffer_info[i]; | ||
2183 | tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); | ||
2184 | tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | ||
2185 | tx_desc->read.cmd_type_len = | ||
2186 | cpu_to_le32(cmd_type_len | buffer_info->length); | ||
2187 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); | ||
2188 | i++; | ||
2189 | if (i == tx_ring->count) | ||
2190 | i = 0; | ||
2191 | } | ||
2192 | |||
2193 | tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); | ||
2194 | /* Force memory writes to complete before letting h/w | ||
2195 | * know there are new descriptors to fetch. (Only | ||
2196 | * applicable for weak-ordered memory model archs, | ||
2197 | * such as IA-64). */ | ||
2198 | wmb(); | ||
2199 | |||
2200 | tx_ring->next_to_use = i; | ||
2201 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | ||
2202 | /* we need this if more than one processor can write to our tail | ||
2203 | * at a time, it syncronizes IO on IA64/Altix systems */ | ||
2204 | mmiowb(); | ||
2205 | } | ||
2206 | |||
2207 | static int igbvf_xmit_frame_ring_adv(struct sk_buff *skb, | ||
2208 | struct net_device *netdev, | ||
2209 | struct igbvf_ring *tx_ring) | ||
2210 | { | ||
2211 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2212 | unsigned int first, tx_flags = 0; | ||
2213 | u8 hdr_len = 0; | ||
2214 | int count = 0; | ||
2215 | int tso = 0; | ||
2216 | |||
2217 | if (test_bit(__IGBVF_DOWN, &adapter->state)) { | ||
2218 | dev_kfree_skb_any(skb); | ||
2219 | return NETDEV_TX_OK; | ||
2220 | } | ||
2221 | |||
2222 | if (skb->len <= 0) { | ||
2223 | dev_kfree_skb_any(skb); | ||
2224 | return NETDEV_TX_OK; | ||
2225 | } | ||
2226 | |||
2227 | /* | ||
2228 | * need: count + 4 desc gap to keep tail from touching | ||
2229 | * + 2 desc gap to keep tail from touching head, | ||
2230 | * + 1 desc for skb->data, | ||
2231 | * + 1 desc for context descriptor, | ||
2232 | * head, otherwise try next time | ||
2233 | */ | ||
2234 | if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { | ||
2235 | /* this is a hard error */ | ||
2236 | return NETDEV_TX_BUSY; | ||
2237 | } | ||
2238 | |||
2239 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | ||
2240 | tx_flags |= IGBVF_TX_FLAGS_VLAN; | ||
2241 | tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); | ||
2242 | } | ||
2243 | |||
2244 | if (skb->protocol == htons(ETH_P_IP)) | ||
2245 | tx_flags |= IGBVF_TX_FLAGS_IPV4; | ||
2246 | |||
2247 | first = tx_ring->next_to_use; | ||
2248 | |||
2249 | tso = skb_is_gso(skb) ? | ||
2250 | igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; | ||
2251 | if (unlikely(tso < 0)) { | ||
2252 | dev_kfree_skb_any(skb); | ||
2253 | return NETDEV_TX_OK; | ||
2254 | } | ||
2255 | |||
2256 | if (tso) | ||
2257 | tx_flags |= IGBVF_TX_FLAGS_TSO; | ||
2258 | else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && | ||
2259 | (skb->ip_summed == CHECKSUM_PARTIAL)) | ||
2260 | tx_flags |= IGBVF_TX_FLAGS_CSUM; | ||
2261 | |||
2262 | /* | ||
2263 | * count reflects descriptors mapped, if 0 then mapping error | ||
2264 | * has occured and we need to rewind the descriptor queue | ||
2265 | */ | ||
2266 | count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); | ||
2267 | |||
2268 | if (count) { | ||
2269 | igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, | ||
2270 | skb->len, hdr_len); | ||
2271 | netdev->trans_start = jiffies; | ||
2272 | /* Make sure there is space in the ring for the next send. */ | ||
2273 | igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); | ||
2274 | } else { | ||
2275 | dev_kfree_skb_any(skb); | ||
2276 | tx_ring->buffer_info[first].time_stamp = 0; | ||
2277 | tx_ring->next_to_use = first; | ||
2278 | } | ||
2279 | |||
2280 | return NETDEV_TX_OK; | ||
2281 | } | ||
2282 | |||
2283 | static int igbvf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
2284 | { | ||
2285 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2286 | struct igbvf_ring *tx_ring; | ||
2287 | int retval; | ||
2288 | |||
2289 | if (test_bit(__IGBVF_DOWN, &adapter->state)) { | ||
2290 | dev_kfree_skb_any(skb); | ||
2291 | return NETDEV_TX_OK; | ||
2292 | } | ||
2293 | |||
2294 | tx_ring = &adapter->tx_ring[0]; | ||
2295 | |||
2296 | retval = igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); | ||
2297 | |||
2298 | return retval; | ||
2299 | } | ||
2300 | |||
2301 | /** | ||
2302 | * igbvf_tx_timeout - Respond to a Tx Hang | ||
2303 | * @netdev: network interface device structure | ||
2304 | **/ | ||
2305 | static void igbvf_tx_timeout(struct net_device *netdev) | ||
2306 | { | ||
2307 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2308 | |||
2309 | /* Do the reset outside of interrupt context */ | ||
2310 | adapter->tx_timeout_count++; | ||
2311 | schedule_work(&adapter->reset_task); | ||
2312 | } | ||
2313 | |||
2314 | static void igbvf_reset_task(struct work_struct *work) | ||
2315 | { | ||
2316 | struct igbvf_adapter *adapter; | ||
2317 | adapter = container_of(work, struct igbvf_adapter, reset_task); | ||
2318 | |||
2319 | igbvf_reinit_locked(adapter); | ||
2320 | } | ||
2321 | |||
2322 | /** | ||
2323 | * igbvf_get_stats - Get System Network Statistics | ||
2324 | * @netdev: network interface device structure | ||
2325 | * | ||
2326 | * Returns the address of the device statistics structure. | ||
2327 | * The statistics are actually updated from the timer callback. | ||
2328 | **/ | ||
2329 | static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) | ||
2330 | { | ||
2331 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2332 | |||
2333 | /* only return the current stats */ | ||
2334 | return &adapter->net_stats; | ||
2335 | } | ||
2336 | |||
2337 | /** | ||
2338 | * igbvf_change_mtu - Change the Maximum Transfer Unit | ||
2339 | * @netdev: network interface device structure | ||
2340 | * @new_mtu: new value for maximum frame size | ||
2341 | * | ||
2342 | * Returns 0 on success, negative on failure | ||
2343 | **/ | ||
2344 | static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) | ||
2345 | { | ||
2346 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2347 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | ||
2348 | |||
2349 | if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { | ||
2350 | dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); | ||
2351 | return -EINVAL; | ||
2352 | } | ||
2353 | |||
2354 | /* Jumbo frame size limits */ | ||
2355 | if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { | ||
2356 | if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { | ||
2357 | dev_err(&adapter->pdev->dev, | ||
2358 | "Jumbo Frames not supported.\n"); | ||
2359 | return -EINVAL; | ||
2360 | } | ||
2361 | } | ||
2362 | |||
2363 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 | ||
2364 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | ||
2365 | dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); | ||
2366 | return -EINVAL; | ||
2367 | } | ||
2368 | |||
2369 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) | ||
2370 | msleep(1); | ||
2371 | /* igbvf_down has a dependency on max_frame_size */ | ||
2372 | adapter->max_frame_size = max_frame; | ||
2373 | if (netif_running(netdev)) | ||
2374 | igbvf_down(adapter); | ||
2375 | |||
2376 | /* | ||
2377 | * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | ||
2378 | * means we reserve 2 more, this pushes us to allocate from the next | ||
2379 | * larger slab size. | ||
2380 | * i.e. RXBUFFER_2048 --> size-4096 slab | ||
2381 | * However with the new *_jumbo_rx* routines, jumbo receives will use | ||
2382 | * fragmented skbs | ||
2383 | */ | ||
2384 | |||
2385 | if (max_frame <= 1024) | ||
2386 | adapter->rx_buffer_len = 1024; | ||
2387 | else if (max_frame <= 2048) | ||
2388 | adapter->rx_buffer_len = 2048; | ||
2389 | else | ||
2390 | #if (PAGE_SIZE / 2) > 16384 | ||
2391 | adapter->rx_buffer_len = 16384; | ||
2392 | #else | ||
2393 | adapter->rx_buffer_len = PAGE_SIZE / 2; | ||
2394 | #endif | ||
2395 | |||
2396 | |||
2397 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | ||
2398 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || | ||
2399 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) | ||
2400 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + | ||
2401 | ETH_FCS_LEN; | ||
2402 | |||
2403 | dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", | ||
2404 | netdev->mtu, new_mtu); | ||
2405 | netdev->mtu = new_mtu; | ||
2406 | |||
2407 | if (netif_running(netdev)) | ||
2408 | igbvf_up(adapter); | ||
2409 | else | ||
2410 | igbvf_reset(adapter); | ||
2411 | |||
2412 | clear_bit(__IGBVF_RESETTING, &adapter->state); | ||
2413 | |||
2414 | return 0; | ||
2415 | } | ||
2416 | |||
2417 | static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
2418 | { | ||
2419 | switch (cmd) { | ||
2420 | default: | ||
2421 | return -EOPNOTSUPP; | ||
2422 | } | ||
2423 | } | ||
2424 | |||
2425 | static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2426 | { | ||
2427 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2428 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2429 | #ifdef CONFIG_PM | ||
2430 | int retval = 0; | ||
2431 | #endif | ||
2432 | |||
2433 | netif_device_detach(netdev); | ||
2434 | |||
2435 | if (netif_running(netdev)) { | ||
2436 | WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); | ||
2437 | igbvf_down(adapter); | ||
2438 | igbvf_free_irq(adapter); | ||
2439 | } | ||
2440 | |||
2441 | #ifdef CONFIG_PM | ||
2442 | retval = pci_save_state(pdev); | ||
2443 | if (retval) | ||
2444 | return retval; | ||
2445 | #endif | ||
2446 | |||
2447 | pci_disable_device(pdev); | ||
2448 | |||
2449 | return 0; | ||
2450 | } | ||
2451 | |||
2452 | #ifdef CONFIG_PM | ||
2453 | static int igbvf_resume(struct pci_dev *pdev) | ||
2454 | { | ||
2455 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2456 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2457 | u32 err; | ||
2458 | |||
2459 | pci_restore_state(pdev); | ||
2460 | err = pci_enable_device_mem(pdev); | ||
2461 | if (err) { | ||
2462 | dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); | ||
2463 | return err; | ||
2464 | } | ||
2465 | |||
2466 | pci_set_master(pdev); | ||
2467 | |||
2468 | if (netif_running(netdev)) { | ||
2469 | err = igbvf_request_irq(adapter); | ||
2470 | if (err) | ||
2471 | return err; | ||
2472 | } | ||
2473 | |||
2474 | igbvf_reset(adapter); | ||
2475 | |||
2476 | if (netif_running(netdev)) | ||
2477 | igbvf_up(adapter); | ||
2478 | |||
2479 | netif_device_attach(netdev); | ||
2480 | |||
2481 | return 0; | ||
2482 | } | ||
2483 | #endif | ||
2484 | |||
2485 | static void igbvf_shutdown(struct pci_dev *pdev) | ||
2486 | { | ||
2487 | igbvf_suspend(pdev, PMSG_SUSPEND); | ||
2488 | } | ||
2489 | |||
2490 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2491 | /* | ||
2492 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
2493 | * without having to re-enable interrupts. It's not called while | ||
2494 | * the interrupt routine is executing. | ||
2495 | */ | ||
2496 | static void igbvf_netpoll(struct net_device *netdev) | ||
2497 | { | ||
2498 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2499 | |||
2500 | disable_irq(adapter->pdev->irq); | ||
2501 | |||
2502 | igbvf_clean_tx_irq(adapter->tx_ring); | ||
2503 | |||
2504 | enable_irq(adapter->pdev->irq); | ||
2505 | } | ||
2506 | #endif | ||
2507 | |||
2508 | /** | ||
2509 | * igbvf_io_error_detected - called when PCI error is detected | ||
2510 | * @pdev: Pointer to PCI device | ||
2511 | * @state: The current pci connection state | ||
2512 | * | ||
2513 | * This function is called after a PCI bus error affecting | ||
2514 | * this device has been detected. | ||
2515 | */ | ||
2516 | static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, | ||
2517 | pci_channel_state_t state) | ||
2518 | { | ||
2519 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2520 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2521 | |||
2522 | netif_device_detach(netdev); | ||
2523 | |||
2524 | if (netif_running(netdev)) | ||
2525 | igbvf_down(adapter); | ||
2526 | pci_disable_device(pdev); | ||
2527 | |||
2528 | /* Request a slot slot reset. */ | ||
2529 | return PCI_ERS_RESULT_NEED_RESET; | ||
2530 | } | ||
2531 | |||
2532 | /** | ||
2533 | * igbvf_io_slot_reset - called after the pci bus has been reset. | ||
2534 | * @pdev: Pointer to PCI device | ||
2535 | * | ||
2536 | * Restart the card from scratch, as if from a cold-boot. Implementation | ||
2537 | * resembles the first-half of the igbvf_resume routine. | ||
2538 | */ | ||
2539 | static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) | ||
2540 | { | ||
2541 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2542 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2543 | |||
2544 | if (pci_enable_device_mem(pdev)) { | ||
2545 | dev_err(&pdev->dev, | ||
2546 | "Cannot re-enable PCI device after reset.\n"); | ||
2547 | return PCI_ERS_RESULT_DISCONNECT; | ||
2548 | } | ||
2549 | pci_set_master(pdev); | ||
2550 | |||
2551 | igbvf_reset(adapter); | ||
2552 | |||
2553 | return PCI_ERS_RESULT_RECOVERED; | ||
2554 | } | ||
2555 | |||
2556 | /** | ||
2557 | * igbvf_io_resume - called when traffic can start flowing again. | ||
2558 | * @pdev: Pointer to PCI device | ||
2559 | * | ||
2560 | * This callback is called when the error recovery driver tells us that | ||
2561 | * its OK to resume normal operation. Implementation resembles the | ||
2562 | * second-half of the igbvf_resume routine. | ||
2563 | */ | ||
2564 | static void igbvf_io_resume(struct pci_dev *pdev) | ||
2565 | { | ||
2566 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2567 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2568 | |||
2569 | if (netif_running(netdev)) { | ||
2570 | if (igbvf_up(adapter)) { | ||
2571 | dev_err(&pdev->dev, | ||
2572 | "can't bring device back up after reset\n"); | ||
2573 | return; | ||
2574 | } | ||
2575 | } | ||
2576 | |||
2577 | netif_device_attach(netdev); | ||
2578 | } | ||
2579 | |||
2580 | static void igbvf_print_device_info(struct igbvf_adapter *adapter) | ||
2581 | { | ||
2582 | struct e1000_hw *hw = &adapter->hw; | ||
2583 | struct net_device *netdev = adapter->netdev; | ||
2584 | struct pci_dev *pdev = adapter->pdev; | ||
2585 | |||
2586 | dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); | ||
2587 | dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
2588 | /* MAC address */ | ||
2589 | netdev->dev_addr[0], netdev->dev_addr[1], | ||
2590 | netdev->dev_addr[2], netdev->dev_addr[3], | ||
2591 | netdev->dev_addr[4], netdev->dev_addr[5]); | ||
2592 | dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); | ||
2593 | } | ||
2594 | |||
2595 | static const struct net_device_ops igbvf_netdev_ops = { | ||
2596 | .ndo_open = igbvf_open, | ||
2597 | .ndo_stop = igbvf_close, | ||
2598 | .ndo_start_xmit = igbvf_xmit_frame, | ||
2599 | .ndo_get_stats = igbvf_get_stats, | ||
2600 | .ndo_set_multicast_list = igbvf_set_multi, | ||
2601 | .ndo_set_mac_address = igbvf_set_mac, | ||
2602 | .ndo_change_mtu = igbvf_change_mtu, | ||
2603 | .ndo_do_ioctl = igbvf_ioctl, | ||
2604 | .ndo_tx_timeout = igbvf_tx_timeout, | ||
2605 | .ndo_vlan_rx_register = igbvf_vlan_rx_register, | ||
2606 | .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, | ||
2607 | .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, | ||
2608 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2609 | .ndo_poll_controller = igbvf_netpoll, | ||
2610 | #endif | ||
2611 | }; | ||
2612 | |||
2613 | /** | ||
2614 | * igbvf_probe - Device Initialization Routine | ||
2615 | * @pdev: PCI device information struct | ||
2616 | * @ent: entry in igbvf_pci_tbl | ||
2617 | * | ||
2618 | * Returns 0 on success, negative on failure | ||
2619 | * | ||
2620 | * igbvf_probe initializes an adapter identified by a pci_dev structure. | ||
2621 | * The OS initialization, configuring of the adapter private structure, | ||
2622 | * and a hardware reset occur. | ||
2623 | **/ | ||
2624 | static int __devinit igbvf_probe(struct pci_dev *pdev, | ||
2625 | const struct pci_device_id *ent) | ||
2626 | { | ||
2627 | struct net_device *netdev; | ||
2628 | struct igbvf_adapter *adapter; | ||
2629 | struct e1000_hw *hw; | ||
2630 | const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; | ||
2631 | |||
2632 | static int cards_found; | ||
2633 | int err, pci_using_dac; | ||
2634 | |||
2635 | err = pci_enable_device_mem(pdev); | ||
2636 | if (err) | ||
2637 | return err; | ||
2638 | |||
2639 | pci_using_dac = 0; | ||
2640 | err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | ||
2641 | if (!err) { | ||
2642 | err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | ||
2643 | if (!err) | ||
2644 | pci_using_dac = 1; | ||
2645 | } else { | ||
2646 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
2647 | if (err) { | ||
2648 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
2649 | if (err) { | ||
2650 | dev_err(&pdev->dev, "No usable DMA " | ||
2651 | "configuration, aborting\n"); | ||
2652 | goto err_dma; | ||
2653 | } | ||
2654 | } | ||
2655 | } | ||
2656 | |||
2657 | err = pci_request_regions(pdev, igbvf_driver_name); | ||
2658 | if (err) | ||
2659 | goto err_pci_reg; | ||
2660 | |||
2661 | pci_set_master(pdev); | ||
2662 | |||
2663 | err = -ENOMEM; | ||
2664 | netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); | ||
2665 | if (!netdev) | ||
2666 | goto err_alloc_etherdev; | ||
2667 | |||
2668 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
2669 | |||
2670 | pci_set_drvdata(pdev, netdev); | ||
2671 | adapter = netdev_priv(netdev); | ||
2672 | hw = &adapter->hw; | ||
2673 | adapter->netdev = netdev; | ||
2674 | adapter->pdev = pdev; | ||
2675 | adapter->ei = ei; | ||
2676 | adapter->pba = ei->pba; | ||
2677 | adapter->flags = ei->flags; | ||
2678 | adapter->hw.back = adapter; | ||
2679 | adapter->hw.mac.type = ei->mac; | ||
2680 | adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; | ||
2681 | |||
2682 | /* PCI config space info */ | ||
2683 | |||
2684 | hw->vendor_id = pdev->vendor; | ||
2685 | hw->device_id = pdev->device; | ||
2686 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | ||
2687 | hw->subsystem_device_id = pdev->subsystem_device; | ||
2688 | |||
2689 | pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); | ||
2690 | |||
2691 | err = -EIO; | ||
2692 | adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), | ||
2693 | pci_resource_len(pdev, 0)); | ||
2694 | |||
2695 | if (!adapter->hw.hw_addr) | ||
2696 | goto err_ioremap; | ||
2697 | |||
2698 | if (ei->get_variants) { | ||
2699 | err = ei->get_variants(adapter); | ||
2700 | if (err) | ||
2701 | goto err_ioremap; | ||
2702 | } | ||
2703 | |||
2704 | /* setup adapter struct */ | ||
2705 | err = igbvf_sw_init(adapter); | ||
2706 | if (err) | ||
2707 | goto err_sw_init; | ||
2708 | |||
2709 | /* construct the net_device struct */ | ||
2710 | netdev->netdev_ops = &igbvf_netdev_ops; | ||
2711 | |||
2712 | igbvf_set_ethtool_ops(netdev); | ||
2713 | netdev->watchdog_timeo = 5 * HZ; | ||
2714 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | ||
2715 | |||
2716 | adapter->bd_number = cards_found++; | ||
2717 | |||
2718 | netdev->features = NETIF_F_SG | | ||
2719 | NETIF_F_IP_CSUM | | ||
2720 | NETIF_F_HW_VLAN_TX | | ||
2721 | NETIF_F_HW_VLAN_RX | | ||
2722 | NETIF_F_HW_VLAN_FILTER; | ||
2723 | |||
2724 | netdev->features |= NETIF_F_IPV6_CSUM; | ||
2725 | netdev->features |= NETIF_F_TSO; | ||
2726 | netdev->features |= NETIF_F_TSO6; | ||
2727 | |||
2728 | if (pci_using_dac) | ||
2729 | netdev->features |= NETIF_F_HIGHDMA; | ||
2730 | |||
2731 | netdev->vlan_features |= NETIF_F_TSO; | ||
2732 | netdev->vlan_features |= NETIF_F_TSO6; | ||
2733 | netdev->vlan_features |= NETIF_F_IP_CSUM; | ||
2734 | netdev->vlan_features |= NETIF_F_IPV6_CSUM; | ||
2735 | netdev->vlan_features |= NETIF_F_SG; | ||
2736 | |||
2737 | /*reset the controller to put the device in a known good state */ | ||
2738 | err = hw->mac.ops.reset_hw(hw); | ||
2739 | if (err) { | ||
2740 | dev_info(&pdev->dev, | ||
2741 | "PF still in reset state, assigning new address\n"); | ||
2742 | random_ether_addr(hw->mac.addr); | ||
2743 | } else { | ||
2744 | err = hw->mac.ops.read_mac_addr(hw); | ||
2745 | if (err) { | ||
2746 | dev_err(&pdev->dev, "Error reading MAC address\n"); | ||
2747 | goto err_hw_init; | ||
2748 | } | ||
2749 | } | ||
2750 | |||
2751 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); | ||
2752 | memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); | ||
2753 | |||
2754 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
2755 | dev_err(&pdev->dev, "Invalid MAC Address: " | ||
2756 | "%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
2757 | netdev->dev_addr[0], netdev->dev_addr[1], | ||
2758 | netdev->dev_addr[2], netdev->dev_addr[3], | ||
2759 | netdev->dev_addr[4], netdev->dev_addr[5]); | ||
2760 | err = -EIO; | ||
2761 | goto err_hw_init; | ||
2762 | } | ||
2763 | |||
2764 | setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, | ||
2765 | (unsigned long) adapter); | ||
2766 | |||
2767 | INIT_WORK(&adapter->reset_task, igbvf_reset_task); | ||
2768 | INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); | ||
2769 | |||
2770 | /* ring size defaults */ | ||
2771 | adapter->rx_ring->count = 1024; | ||
2772 | adapter->tx_ring->count = 1024; | ||
2773 | |||
2774 | /* reset the hardware with the new settings */ | ||
2775 | igbvf_reset(adapter); | ||
2776 | |||
2777 | /* tell the stack to leave us alone until igbvf_open() is called */ | ||
2778 | netif_carrier_off(netdev); | ||
2779 | netif_stop_queue(netdev); | ||
2780 | |||
2781 | strcpy(netdev->name, "eth%d"); | ||
2782 | err = register_netdev(netdev); | ||
2783 | if (err) | ||
2784 | goto err_hw_init; | ||
2785 | |||
2786 | igbvf_print_device_info(adapter); | ||
2787 | |||
2788 | igbvf_initialize_last_counter_stats(adapter); | ||
2789 | |||
2790 | return 0; | ||
2791 | |||
2792 | err_hw_init: | ||
2793 | kfree(adapter->tx_ring); | ||
2794 | kfree(adapter->rx_ring); | ||
2795 | err_sw_init: | ||
2796 | igbvf_reset_interrupt_capability(adapter); | ||
2797 | iounmap(adapter->hw.hw_addr); | ||
2798 | err_ioremap: | ||
2799 | free_netdev(netdev); | ||
2800 | err_alloc_etherdev: | ||
2801 | pci_release_regions(pdev); | ||
2802 | err_pci_reg: | ||
2803 | err_dma: | ||
2804 | pci_disable_device(pdev); | ||
2805 | return err; | ||
2806 | } | ||
2807 | |||
2808 | /** | ||
2809 | * igbvf_remove - Device Removal Routine | ||
2810 | * @pdev: PCI device information struct | ||
2811 | * | ||
2812 | * igbvf_remove is called by the PCI subsystem to alert the driver | ||
2813 | * that it should release a PCI device. The could be caused by a | ||
2814 | * Hot-Plug event, or because the driver is going to be removed from | ||
2815 | * memory. | ||
2816 | **/ | ||
2817 | static void __devexit igbvf_remove(struct pci_dev *pdev) | ||
2818 | { | ||
2819 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2820 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
2821 | struct e1000_hw *hw = &adapter->hw; | ||
2822 | |||
2823 | /* | ||
2824 | * flush_scheduled work may reschedule our watchdog task, so | ||
2825 | * explicitly disable watchdog tasks from being rescheduled | ||
2826 | */ | ||
2827 | set_bit(__IGBVF_DOWN, &adapter->state); | ||
2828 | del_timer_sync(&adapter->watchdog_timer); | ||
2829 | |||
2830 | flush_scheduled_work(); | ||
2831 | |||
2832 | unregister_netdev(netdev); | ||
2833 | |||
2834 | igbvf_reset_interrupt_capability(adapter); | ||
2835 | |||
2836 | /* | ||
2837 | * it is important to delete the napi struct prior to freeing the | ||
2838 | * rx ring so that you do not end up with null pointer refs | ||
2839 | */ | ||
2840 | netif_napi_del(&adapter->rx_ring->napi); | ||
2841 | kfree(adapter->tx_ring); | ||
2842 | kfree(adapter->rx_ring); | ||
2843 | |||
2844 | iounmap(hw->hw_addr); | ||
2845 | if (hw->flash_address) | ||
2846 | iounmap(hw->flash_address); | ||
2847 | pci_release_regions(pdev); | ||
2848 | |||
2849 | free_netdev(netdev); | ||
2850 | |||
2851 | pci_disable_device(pdev); | ||
2852 | } | ||
2853 | |||
2854 | /* PCI Error Recovery (ERS) */ | ||
2855 | static struct pci_error_handlers igbvf_err_handler = { | ||
2856 | .error_detected = igbvf_io_error_detected, | ||
2857 | .slot_reset = igbvf_io_slot_reset, | ||
2858 | .resume = igbvf_io_resume, | ||
2859 | }; | ||
2860 | |||
2861 | static struct pci_device_id igbvf_pci_tbl[] = { | ||
2862 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, | ||
2863 | { } /* terminate list */ | ||
2864 | }; | ||
2865 | MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); | ||
2866 | |||
2867 | /* PCI Device API Driver */ | ||
2868 | static struct pci_driver igbvf_driver = { | ||
2869 | .name = igbvf_driver_name, | ||
2870 | .id_table = igbvf_pci_tbl, | ||
2871 | .probe = igbvf_probe, | ||
2872 | .remove = __devexit_p(igbvf_remove), | ||
2873 | #ifdef CONFIG_PM | ||
2874 | /* Power Management Hooks */ | ||
2875 | .suspend = igbvf_suspend, | ||
2876 | .resume = igbvf_resume, | ||
2877 | #endif | ||
2878 | .shutdown = igbvf_shutdown, | ||
2879 | .err_handler = &igbvf_err_handler | ||
2880 | }; | ||
2881 | |||
2882 | /** | ||
2883 | * igbvf_init_module - Driver Registration Routine | ||
2884 | * | ||
2885 | * igbvf_init_module is the first routine called when the driver is | ||
2886 | * loaded. All it does is register with the PCI subsystem. | ||
2887 | **/ | ||
2888 | static int __init igbvf_init_module(void) | ||
2889 | { | ||
2890 | int ret; | ||
2891 | printk(KERN_INFO "%s - version %s\n", | ||
2892 | igbvf_driver_string, igbvf_driver_version); | ||
2893 | printk(KERN_INFO "%s\n", igbvf_copyright); | ||
2894 | |||
2895 | ret = pci_register_driver(&igbvf_driver); | ||
2896 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name, | ||
2897 | PM_QOS_DEFAULT_VALUE); | ||
2898 | |||
2899 | return ret; | ||
2900 | } | ||
2901 | module_init(igbvf_init_module); | ||
2902 | |||
2903 | /** | ||
2904 | * igbvf_exit_module - Driver Exit Cleanup Routine | ||
2905 | * | ||
2906 | * igbvf_exit_module is called just before the driver is removed | ||
2907 | * from memory. | ||
2908 | **/ | ||
2909 | static void __exit igbvf_exit_module(void) | ||
2910 | { | ||
2911 | pci_unregister_driver(&igbvf_driver); | ||
2912 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name); | ||
2913 | } | ||
2914 | module_exit(igbvf_exit_module); | ||
2915 | |||
2916 | |||
2917 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); | ||
2918 | MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver"); | ||
2919 | MODULE_LICENSE("GPL"); | ||
2920 | MODULE_VERSION(DRV_VERSION); | ||
2921 | |||
2922 | /* netdev.c */ | ||
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h new file mode 100644 index 000000000000..b9e24ed70d0a --- /dev/null +++ b/drivers/net/igbvf/regs.h | |||
@@ -0,0 +1,108 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel(R) 82576 Virtual Function Linux driver | ||
4 | Copyright(c) 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #ifndef _E1000_REGS_H_ | ||
29 | #define _E1000_REGS_H_ | ||
30 | |||
31 | #define E1000_CTRL 0x00000 /* Device Control - RW */ | ||
32 | #define E1000_STATUS 0x00008 /* Device Status - RO */ | ||
33 | #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ | ||
34 | #define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ | ||
35 | #define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) | ||
36 | #define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ | ||
37 | #define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ | ||
38 | #define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ | ||
39 | #define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ | ||
40 | #define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ | ||
41 | #define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ | ||
42 | #define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ | ||
43 | /* | ||
44 | * Convenience macros | ||
45 | * | ||
46 | * Note: "_n" is the queue number of the register to be written to. | ||
47 | * | ||
48 | * Example usage: | ||
49 | * E1000_RDBAL_REG(current_rx_queue) | ||
50 | */ | ||
51 | #define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ | ||
52 | (0x0C000 + ((_n) * 0x40))) | ||
53 | #define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ | ||
54 | (0x0C004 + ((_n) * 0x40))) | ||
55 | #define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ | ||
56 | (0x0C008 + ((_n) * 0x40))) | ||
57 | #define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ | ||
58 | (0x0C00C + ((_n) * 0x40))) | ||
59 | #define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ | ||
60 | (0x0C010 + ((_n) * 0x40))) | ||
61 | #define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ | ||
62 | (0x0C018 + ((_n) * 0x40))) | ||
63 | #define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ | ||
64 | (0x0C028 + ((_n) * 0x40))) | ||
65 | #define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ | ||
66 | (0x0E000 + ((_n) * 0x40))) | ||
67 | #define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ | ||
68 | (0x0E004 + ((_n) * 0x40))) | ||
69 | #define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ | ||
70 | (0x0E008 + ((_n) * 0x40))) | ||
71 | #define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ | ||
72 | (0x0E010 + ((_n) * 0x40))) | ||
73 | #define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ | ||
74 | (0x0E018 + ((_n) * 0x40))) | ||
75 | #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ | ||
76 | (0x0E028 + ((_n) * 0x40))) | ||
77 | #define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) | ||
78 | #define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) | ||
79 | #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ | ||
80 | (0x054E0 + ((_i - 16) * 8))) | ||
81 | #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ | ||
82 | (0x054E4 + ((_i - 16) * 8))) | ||
83 | |||
84 | /* Statistics registers */ | ||
85 | #define E1000_VFGPRC 0x00F10 | ||
86 | #define E1000_VFGORC 0x00F18 | ||
87 | #define E1000_VFMPRC 0x00F3C | ||
88 | #define E1000_VFGPTC 0x00F14 | ||
89 | #define E1000_VFGOTC 0x00F34 | ||
90 | #define E1000_VFGOTLBC 0x00F50 | ||
91 | #define E1000_VFGPTLBC 0x00F44 | ||
92 | #define E1000_VFGORLBC 0x00F48 | ||
93 | #define E1000_VFGPRLBC 0x00F40 | ||
94 | |||
95 | /* These act per VF so an array friendly macro is used */ | ||
96 | #define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) | ||
97 | #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) | ||
98 | |||
99 | /* Define macros for handling registers */ | ||
100 | #define er32(reg) readl(hw->hw_addr + E1000_##reg) | ||
101 | #define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) | ||
102 | #define array_er32(reg, offset) \ | ||
103 | readl(hw->hw_addr + E1000_##reg + (offset << 2)) | ||
104 | #define array_ew32(reg, offset, val) \ | ||
105 | writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) | ||
106 | #define e1e_flush() er32(STATUS) | ||
107 | |||
108 | #endif | ||
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c new file mode 100644 index 000000000000..2a4faf9ade69 --- /dev/null +++ b/drivers/net/igbvf/vf.c | |||
@@ -0,0 +1,398 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel(R) 82576 Virtual Function Linux driver | ||
4 | Copyright(c) 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | |||
29 | #include "vf.h" | ||
30 | |||
31 | static s32 e1000_check_for_link_vf(struct e1000_hw *hw); | ||
32 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, | ||
33 | u16 *duplex); | ||
34 | static s32 e1000_init_hw_vf(struct e1000_hw *hw); | ||
35 | static s32 e1000_reset_hw_vf(struct e1000_hw *hw); | ||
36 | |||
37 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, | ||
38 | u32, u32, u32); | ||
39 | static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); | ||
40 | static s32 e1000_read_mac_addr_vf(struct e1000_hw *); | ||
41 | static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); | ||
42 | |||
43 | /** | ||
44 | * e1000_init_mac_params_vf - Inits MAC params | ||
45 | * @hw: pointer to the HW structure | ||
46 | **/ | ||
47 | static s32 e1000_init_mac_params_vf(struct e1000_hw *hw) | ||
48 | { | ||
49 | struct e1000_mac_info *mac = &hw->mac; | ||
50 | |||
51 | /* VF's have no MTA Registers - PF feature only */ | ||
52 | mac->mta_reg_count = 128; | ||
53 | /* VF's have no access to RAR entries */ | ||
54 | mac->rar_entry_count = 1; | ||
55 | |||
56 | /* Function pointers */ | ||
57 | /* reset */ | ||
58 | mac->ops.reset_hw = e1000_reset_hw_vf; | ||
59 | /* hw initialization */ | ||
60 | mac->ops.init_hw = e1000_init_hw_vf; | ||
61 | /* check for link */ | ||
62 | mac->ops.check_for_link = e1000_check_for_link_vf; | ||
63 | /* link info */ | ||
64 | mac->ops.get_link_up_info = e1000_get_link_up_info_vf; | ||
65 | /* multicast address update */ | ||
66 | mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; | ||
67 | /* set mac address */ | ||
68 | mac->ops.rar_set = e1000_rar_set_vf; | ||
69 | /* read mac address */ | ||
70 | mac->ops.read_mac_addr = e1000_read_mac_addr_vf; | ||
71 | /* set vlan filter table array */ | ||
72 | mac->ops.set_vfta = e1000_set_vfta_vf; | ||
73 | |||
74 | return E1000_SUCCESS; | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * e1000_init_function_pointers_vf - Inits function pointers | ||
79 | * @hw: pointer to the HW structure | ||
80 | **/ | ||
81 | void e1000_init_function_pointers_vf(struct e1000_hw *hw) | ||
82 | { | ||
83 | hw->mac.ops.init_params = e1000_init_mac_params_vf; | ||
84 | hw->mbx.ops.init_params = e1000_init_mbx_params_vf; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * e1000_get_link_up_info_vf - Gets link info. | ||
89 | * @hw: pointer to the HW structure | ||
90 | * @speed: pointer to 16 bit value to store link speed. | ||
91 | * @duplex: pointer to 16 bit value to store duplex. | ||
92 | * | ||
93 | * Since we cannot read the PHY and get accurate link info, we must rely upon | ||
94 | * the status register's data which is often stale and inaccurate. | ||
95 | **/ | ||
96 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, | ||
97 | u16 *duplex) | ||
98 | { | ||
99 | s32 status; | ||
100 | |||
101 | status = er32(STATUS); | ||
102 | if (status & E1000_STATUS_SPEED_1000) | ||
103 | *speed = SPEED_1000; | ||
104 | else if (status & E1000_STATUS_SPEED_100) | ||
105 | *speed = SPEED_100; | ||
106 | else | ||
107 | *speed = SPEED_10; | ||
108 | |||
109 | if (status & E1000_STATUS_FD) | ||
110 | *duplex = FULL_DUPLEX; | ||
111 | else | ||
112 | *duplex = HALF_DUPLEX; | ||
113 | |||
114 | return E1000_SUCCESS; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * e1000_reset_hw_vf - Resets the HW | ||
119 | * @hw: pointer to the HW structure | ||
120 | * | ||
121 | * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. | ||
122 | * This is all the reset we can perform on a VF. | ||
123 | **/ | ||
124 | static s32 e1000_reset_hw_vf(struct e1000_hw *hw) | ||
125 | { | ||
126 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
127 | u32 timeout = E1000_VF_INIT_TIMEOUT; | ||
128 | u32 ret_val = -E1000_ERR_MAC_INIT; | ||
129 | u32 msgbuf[3]; | ||
130 | u8 *addr = (u8 *)(&msgbuf[1]); | ||
131 | u32 ctrl; | ||
132 | |||
133 | /* assert vf queue/interrupt reset */ | ||
134 | ctrl = er32(CTRL); | ||
135 | ew32(CTRL, ctrl | E1000_CTRL_RST); | ||
136 | |||
137 | /* we cannot initialize while the RSTI / RSTD bits are asserted */ | ||
138 | while (!mbx->ops.check_for_rst(hw) && timeout) { | ||
139 | timeout--; | ||
140 | udelay(5); | ||
141 | } | ||
142 | |||
143 | if (timeout) { | ||
144 | /* mailbox timeout can now become active */ | ||
145 | mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; | ||
146 | |||
147 | /* notify pf of vf reset completion */ | ||
148 | msgbuf[0] = E1000_VF_RESET; | ||
149 | mbx->ops.write_posted(hw, msgbuf, 1); | ||
150 | |||
151 | msleep(10); | ||
152 | |||
153 | /* set our "perm_addr" based on info provided by PF */ | ||
154 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); | ||
155 | if (!ret_val) { | ||
156 | if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) | ||
157 | memcpy(hw->mac.perm_addr, addr, 6); | ||
158 | else | ||
159 | ret_val = -E1000_ERR_MAC_INIT; | ||
160 | } | ||
161 | } | ||
162 | |||
163 | return ret_val; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * e1000_init_hw_vf - Inits the HW | ||
168 | * @hw: pointer to the HW structure | ||
169 | * | ||
170 | * Not much to do here except clear the PF Reset indication if there is one. | ||
171 | **/ | ||
172 | static s32 e1000_init_hw_vf(struct e1000_hw *hw) | ||
173 | { | ||
174 | /* attempt to set and restore our mac address */ | ||
175 | e1000_rar_set_vf(hw, hw->mac.addr, 0); | ||
176 | |||
177 | return E1000_SUCCESS; | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * e1000_hash_mc_addr_vf - Generate a multicast hash value | ||
182 | * @hw: pointer to the HW structure | ||
183 | * @mc_addr: pointer to a multicast address | ||
184 | * | ||
185 | * Generates a multicast address hash value which is used to determine | ||
186 | * the multicast filter table array address and new table value. See | ||
187 | * e1000_mta_set_generic() | ||
188 | **/ | ||
189 | static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) | ||
190 | { | ||
191 | u32 hash_value, hash_mask; | ||
192 | u8 bit_shift = 0; | ||
193 | |||
194 | /* Register count multiplied by bits per register */ | ||
195 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; | ||
196 | |||
197 | /* | ||
198 | * The bit_shift is the number of left-shifts | ||
199 | * where 0xFF would still fall within the hash mask. | ||
200 | */ | ||
201 | while (hash_mask >> bit_shift != 0xFF) | ||
202 | bit_shift++; | ||
203 | |||
204 | hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | | ||
205 | (((u16) mc_addr[5]) << bit_shift))); | ||
206 | |||
207 | return hash_value; | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * e1000_update_mc_addr_list_vf - Update Multicast addresses | ||
212 | * @hw: pointer to the HW structure | ||
213 | * @mc_addr_list: array of multicast addresses to program | ||
214 | * @mc_addr_count: number of multicast addresses to program | ||
215 | * @rar_used_count: the first RAR register free to program | ||
216 | * @rar_count: total number of supported Receive Address Registers | ||
217 | * | ||
218 | * Updates the Receive Address Registers and Multicast Table Array. | ||
219 | * The caller must have a packed mc_addr_list of multicast addresses. | ||
220 | * The parameter rar_count will usually be hw->mac.rar_entry_count | ||
221 | * unless there are workarounds that change this. | ||
222 | **/ | ||
223 | void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, | ||
224 | u8 *mc_addr_list, u32 mc_addr_count, | ||
225 | u32 rar_used_count, u32 rar_count) | ||
226 | { | ||
227 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
228 | u32 msgbuf[E1000_VFMAILBOX_SIZE]; | ||
229 | u16 *hash_list = (u16 *)&msgbuf[1]; | ||
230 | u32 hash_value; | ||
231 | u32 cnt, i; | ||
232 | |||
233 | /* Each entry in the list uses 1 16 bit word. We have 30 | ||
234 | * 16 bit words available in our HW msg buffer (minus 1 for the | ||
235 | * msg type). That's 30 hash values if we pack 'em right. If | ||
236 | * there are more than 30 MC addresses to add then punt the | ||
237 | * extras for now and then add code to handle more than 30 later. | ||
238 | * It would be unusual for a server to request that many multi-cast | ||
239 | * addresses except for in large enterprise network environments. | ||
240 | */ | ||
241 | |||
242 | cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; | ||
243 | msgbuf[0] = E1000_VF_SET_MULTICAST; | ||
244 | msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT; | ||
245 | |||
246 | for (i = 0; i < cnt; i++) { | ||
247 | hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); | ||
248 | hash_list[i] = hash_value & 0x0FFFF; | ||
249 | mc_addr_list += ETH_ADDR_LEN; | ||
250 | } | ||
251 | |||
252 | mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); | ||
253 | } | ||
254 | |||
255 | /** | ||
256 | * e1000_set_vfta_vf - Set/Unset vlan filter table address | ||
257 | * @hw: pointer to the HW structure | ||
258 | * @vid: determines the vfta register and bit to set/unset | ||
259 | * @set: if true then set bit, else clear bit | ||
260 | **/ | ||
261 | static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) | ||
262 | { | ||
263 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
264 | u32 msgbuf[2]; | ||
265 | s32 err; | ||
266 | |||
267 | msgbuf[0] = E1000_VF_SET_VLAN; | ||
268 | msgbuf[1] = vid; | ||
269 | /* Setting the 8 bit field MSG INFO to true indicates "add" */ | ||
270 | if (set) | ||
271 | msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT; | ||
272 | |||
273 | mbx->ops.write_posted(hw, msgbuf, 2); | ||
274 | |||
275 | err = mbx->ops.read_posted(hw, msgbuf, 2); | ||
276 | |||
277 | /* if nacked the vlan was rejected */ | ||
278 | if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) | ||
279 | err = -E1000_ERR_MAC_INIT; | ||
280 | |||
281 | return err; | ||
282 | } | ||
283 | |||
284 | /** e1000_rlpml_set_vf - Set the maximum receive packet length | ||
285 | * @hw: pointer to the HW structure | ||
286 | * @max_size: value to assign to max frame size | ||
287 | **/ | ||
288 | void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) | ||
289 | { | ||
290 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
291 | u32 msgbuf[2]; | ||
292 | |||
293 | msgbuf[0] = E1000_VF_SET_LPE; | ||
294 | msgbuf[1] = max_size; | ||
295 | |||
296 | mbx->ops.write_posted(hw, msgbuf, 2); | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * e1000_rar_set_vf - set device MAC address | ||
301 | * @hw: pointer to the HW structure | ||
302 | * @addr: pointer to the receive address | ||
303 | * @index receive address array register | ||
304 | **/ | ||
305 | static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) | ||
306 | { | ||
307 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
308 | u32 msgbuf[3]; | ||
309 | u8 *msg_addr = (u8 *)(&msgbuf[1]); | ||
310 | s32 ret_val; | ||
311 | |||
312 | memset(msgbuf, 0, 12); | ||
313 | msgbuf[0] = E1000_VF_SET_MAC_ADDR; | ||
314 | memcpy(msg_addr, addr, 6); | ||
315 | ret_val = mbx->ops.write_posted(hw, msgbuf, 3); | ||
316 | |||
317 | if (!ret_val) | ||
318 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); | ||
319 | |||
320 | /* if nacked the address was rejected, use "perm_addr" */ | ||
321 | if (!ret_val && | ||
322 | (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) | ||
323 | e1000_read_mac_addr_vf(hw); | ||
324 | } | ||
325 | |||
326 | /** | ||
327 | * e1000_read_mac_addr_vf - Read device MAC address | ||
328 | * @hw: pointer to the HW structure | ||
329 | **/ | ||
330 | static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) | ||
331 | { | ||
332 | int i; | ||
333 | |||
334 | for (i = 0; i < ETH_ADDR_LEN; i++) | ||
335 | hw->mac.addr[i] = hw->mac.perm_addr[i]; | ||
336 | |||
337 | return E1000_SUCCESS; | ||
338 | } | ||
339 | |||
340 | /** | ||
341 | * e1000_check_for_link_vf - Check for link for a virtual interface | ||
342 | * @hw: pointer to the HW structure | ||
343 | * | ||
344 | * Checks to see if the underlying PF is still talking to the VF and | ||
345 | * if it is then it reports the link state to the hardware, otherwise | ||
346 | * it reports link down and returns an error. | ||
347 | **/ | ||
348 | static s32 e1000_check_for_link_vf(struct e1000_hw *hw) | ||
349 | { | ||
350 | struct e1000_mbx_info *mbx = &hw->mbx; | ||
351 | struct e1000_mac_info *mac = &hw->mac; | ||
352 | s32 ret_val = E1000_SUCCESS; | ||
353 | u32 in_msg = 0; | ||
354 | |||
355 | /* | ||
356 | * We only want to run this if there has been a rst asserted. | ||
357 | * in this case that could mean a link change, device reset, | ||
358 | * or a virtual function reset | ||
359 | */ | ||
360 | |||
361 | /* If we were hit with a reset drop the link */ | ||
362 | if (!mbx->ops.check_for_rst(hw)) | ||
363 | mac->get_link_status = true; | ||
364 | |||
365 | if (!mac->get_link_status) | ||
366 | goto out; | ||
367 | |||
368 | /* if link status is down no point in checking to see if pf is up */ | ||
369 | if (!(er32(STATUS) & E1000_STATUS_LU)) | ||
370 | goto out; | ||
371 | |||
372 | /* if the read failed it could just be a mailbox collision, best wait | ||
373 | * until we are called again and don't report an error */ | ||
374 | if (mbx->ops.read(hw, &in_msg, 1)) | ||
375 | goto out; | ||
376 | |||
377 | /* if incoming message isn't clear to send we are waiting on response */ | ||
378 | if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { | ||
379 | /* message is not CTS and is NACK we must have lost CTS status */ | ||
380 | if (in_msg & E1000_VT_MSGTYPE_NACK) | ||
381 | ret_val = -E1000_ERR_MAC_INIT; | ||
382 | goto out; | ||
383 | } | ||
384 | |||
385 | /* the pf is talking, if we timed out in the past we reinit */ | ||
386 | if (!mbx->timeout) { | ||
387 | ret_val = -E1000_ERR_MAC_INIT; | ||
388 | goto out; | ||
389 | } | ||
390 | |||
391 | /* if we passed all the tests above then the link is up and we no | ||
392 | * longer need to check for link */ | ||
393 | mac->get_link_status = false; | ||
394 | |||
395 | out: | ||
396 | return ret_val; | ||
397 | } | ||
398 | |||
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h new file mode 100644 index 000000000000..1e8ce3741a67 --- /dev/null +++ b/drivers/net/igbvf/vf.h | |||
@@ -0,0 +1,264 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel(R) 82576 Virtual Function Linux driver | ||
4 | Copyright(c) 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #ifndef _E1000_VF_H_ | ||
29 | #define _E1000_VF_H_ | ||
30 | |||
31 | #include <linux/pci.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/if_ether.h> | ||
35 | |||
36 | #include "regs.h" | ||
37 | #include "defines.h" | ||
38 | |||
39 | struct e1000_hw; | ||
40 | |||
41 | #define E1000_DEV_ID_82576_VF 0x10CA | ||
42 | #define E1000_REVISION_0 0 | ||
43 | #define E1000_REVISION_1 1 | ||
44 | #define E1000_REVISION_2 2 | ||
45 | #define E1000_REVISION_3 3 | ||
46 | #define E1000_REVISION_4 4 | ||
47 | |||
48 | #define E1000_FUNC_0 0 | ||
49 | #define E1000_FUNC_1 1 | ||
50 | |||
51 | /* | ||
52 | * Receive Address Register Count | ||
53 | * Number of high/low register pairs in the RAR. The RAR (Receive Address | ||
54 | * Registers) holds the directed and multicast addresses that we monitor. | ||
55 | * These entries are also used for MAC-based filtering. | ||
56 | */ | ||
57 | #define E1000_RAR_ENTRIES_VF 1 | ||
58 | |||
59 | /* Receive Descriptor - Advanced */ | ||
60 | union e1000_adv_rx_desc { | ||
61 | struct { | ||
62 | u64 pkt_addr; /* Packet buffer address */ | ||
63 | u64 hdr_addr; /* Header buffer address */ | ||
64 | } read; | ||
65 | struct { | ||
66 | struct { | ||
67 | union { | ||
68 | u32 data; | ||
69 | struct { | ||
70 | u16 pkt_info; /* RSS/Packet type */ | ||
71 | u16 hdr_info; /* Split Header, | ||
72 | * hdr buffer length */ | ||
73 | } hs_rss; | ||
74 | } lo_dword; | ||
75 | union { | ||
76 | u32 rss; /* RSS Hash */ | ||
77 | struct { | ||
78 | u16 ip_id; /* IP id */ | ||
79 | u16 csum; /* Packet Checksum */ | ||
80 | } csum_ip; | ||
81 | } hi_dword; | ||
82 | } lower; | ||
83 | struct { | ||
84 | u32 status_error; /* ext status/error */ | ||
85 | u16 length; /* Packet length */ | ||
86 | u16 vlan; /* VLAN tag */ | ||
87 | } upper; | ||
88 | } wb; /* writeback */ | ||
89 | }; | ||
90 | |||
91 | #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 | ||
92 | #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 | ||
93 | |||
94 | /* Transmit Descriptor - Advanced */ | ||
95 | union e1000_adv_tx_desc { | ||
96 | struct { | ||
97 | u64 buffer_addr; /* Address of descriptor's data buf */ | ||
98 | u32 cmd_type_len; | ||
99 | u32 olinfo_status; | ||
100 | } read; | ||
101 | struct { | ||
102 | u64 rsvd; /* Reserved */ | ||
103 | u32 nxtseq_seed; | ||
104 | u32 status; | ||
105 | } wb; | ||
106 | }; | ||
107 | |||
108 | /* Adv Transmit Descriptor Config Masks */ | ||
109 | #define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ | ||
110 | #define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ | ||
111 | #define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ | ||
112 | #define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ | ||
113 | #define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ | ||
114 | #define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ | ||
115 | #define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ | ||
116 | #define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ | ||
117 | #define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ | ||
118 | |||
119 | /* Context descriptors */ | ||
120 | struct e1000_adv_tx_context_desc { | ||
121 | u32 vlan_macip_lens; | ||
122 | u32 seqnum_seed; | ||
123 | u32 type_tucmd_mlhl; | ||
124 | u32 mss_l4len_idx; | ||
125 | }; | ||
126 | |||
127 | #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ | ||
128 | #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ | ||
129 | #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ | ||
130 | #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ | ||
131 | #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ | ||
132 | |||
133 | enum e1000_mac_type { | ||
134 | e1000_undefined = 0, | ||
135 | e1000_vfadapt, | ||
136 | e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ | ||
137 | }; | ||
138 | |||
139 | struct e1000_vf_stats { | ||
140 | u64 base_gprc; | ||
141 | u64 base_gptc; | ||
142 | u64 base_gorc; | ||
143 | u64 base_gotc; | ||
144 | u64 base_mprc; | ||
145 | u64 base_gotlbc; | ||
146 | u64 base_gptlbc; | ||
147 | u64 base_gorlbc; | ||
148 | u64 base_gprlbc; | ||
149 | |||
150 | u32 last_gprc; | ||
151 | u32 last_gptc; | ||
152 | u32 last_gorc; | ||
153 | u32 last_gotc; | ||
154 | u32 last_mprc; | ||
155 | u32 last_gotlbc; | ||
156 | u32 last_gptlbc; | ||
157 | u32 last_gorlbc; | ||
158 | u32 last_gprlbc; | ||
159 | |||
160 | u64 gprc; | ||
161 | u64 gptc; | ||
162 | u64 gorc; | ||
163 | u64 gotc; | ||
164 | u64 mprc; | ||
165 | u64 gotlbc; | ||
166 | u64 gptlbc; | ||
167 | u64 gorlbc; | ||
168 | u64 gprlbc; | ||
169 | }; | ||
170 | |||
171 | #include "mbx.h" | ||
172 | |||
173 | struct e1000_mac_operations { | ||
174 | /* Function pointers for the MAC. */ | ||
175 | s32 (*init_params)(struct e1000_hw *); | ||
176 | s32 (*check_for_link)(struct e1000_hw *); | ||
177 | void (*clear_vfta)(struct e1000_hw *); | ||
178 | s32 (*get_bus_info)(struct e1000_hw *); | ||
179 | s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); | ||
180 | void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); | ||
181 | s32 (*reset_hw)(struct e1000_hw *); | ||
182 | s32 (*init_hw)(struct e1000_hw *); | ||
183 | s32 (*setup_link)(struct e1000_hw *); | ||
184 | void (*write_vfta)(struct e1000_hw *, u32, u32); | ||
185 | void (*mta_set)(struct e1000_hw *, u32); | ||
186 | void (*rar_set)(struct e1000_hw *, u8*, u32); | ||
187 | s32 (*read_mac_addr)(struct e1000_hw *); | ||
188 | s32 (*set_vfta)(struct e1000_hw *, u16, bool); | ||
189 | }; | ||
190 | |||
191 | struct e1000_mac_info { | ||
192 | struct e1000_mac_operations ops; | ||
193 | u8 addr[6]; | ||
194 | u8 perm_addr[6]; | ||
195 | |||
196 | enum e1000_mac_type type; | ||
197 | |||
198 | u16 mta_reg_count; | ||
199 | u16 rar_entry_count; | ||
200 | |||
201 | bool get_link_status; | ||
202 | }; | ||
203 | |||
204 | struct e1000_mbx_operations { | ||
205 | s32 (*init_params)(struct e1000_hw *hw); | ||
206 | s32 (*read)(struct e1000_hw *, u32 *, u16); | ||
207 | s32 (*write)(struct e1000_hw *, u32 *, u16); | ||
208 | s32 (*read_posted)(struct e1000_hw *, u32 *, u16); | ||
209 | s32 (*write_posted)(struct e1000_hw *, u32 *, u16); | ||
210 | s32 (*check_for_msg)(struct e1000_hw *); | ||
211 | s32 (*check_for_ack)(struct e1000_hw *); | ||
212 | s32 (*check_for_rst)(struct e1000_hw *); | ||
213 | }; | ||
214 | |||
215 | struct e1000_mbx_stats { | ||
216 | u32 msgs_tx; | ||
217 | u32 msgs_rx; | ||
218 | |||
219 | u32 acks; | ||
220 | u32 reqs; | ||
221 | u32 rsts; | ||
222 | }; | ||
223 | |||
224 | struct e1000_mbx_info { | ||
225 | struct e1000_mbx_operations ops; | ||
226 | struct e1000_mbx_stats stats; | ||
227 | u32 timeout; | ||
228 | u32 usec_delay; | ||
229 | u16 size; | ||
230 | }; | ||
231 | |||
232 | struct e1000_dev_spec_vf { | ||
233 | u32 vf_number; | ||
234 | u32 v2p_mailbox; | ||
235 | }; | ||
236 | |||
237 | struct e1000_hw { | ||
238 | void *back; | ||
239 | |||
240 | u8 __iomem *hw_addr; | ||
241 | u8 __iomem *flash_address; | ||
242 | unsigned long io_base; | ||
243 | |||
244 | struct e1000_mac_info mac; | ||
245 | struct e1000_mbx_info mbx; | ||
246 | |||
247 | union { | ||
248 | struct e1000_dev_spec_vf vf; | ||
249 | } dev_spec; | ||
250 | |||
251 | u16 device_id; | ||
252 | u16 subsystem_vendor_id; | ||
253 | u16 subsystem_device_id; | ||
254 | u16 vendor_id; | ||
255 | |||
256 | u8 revision_id; | ||
257 | }; | ||
258 | |||
259 | /* These functions must be implemented by drivers */ | ||
260 | void e1000_rlpml_set_vf(struct e1000_hw *, u16); | ||
261 | void e1000_init_function_pointers_vf(struct e1000_hw *hw); | ||
262 | |||
263 | |||
264 | #endif /* _E1000_VF_H_ */ | ||
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index cbc63ff13add..c5593f4665a4 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c | |||
@@ -1214,6 +1214,19 @@ static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) | |||
1214 | } | 1214 | } |
1215 | #endif | 1215 | #endif |
1216 | 1216 | ||
1217 | static const struct net_device_ops ioc3_netdev_ops = { | ||
1218 | .ndo_open = ioc3_open, | ||
1219 | .ndo_stop = ioc3_close, | ||
1220 | .ndo_start_xmit = ioc3_start_xmit, | ||
1221 | .ndo_tx_timeout = ioc3_timeout, | ||
1222 | .ndo_get_stats = ioc3_get_stats, | ||
1223 | .ndo_set_multicast_list = ioc3_set_multicast_list, | ||
1224 | .ndo_do_ioctl = ioc3_ioctl, | ||
1225 | .ndo_validate_addr = eth_validate_addr, | ||
1226 | .ndo_set_mac_address = ioc3_set_mac_address, | ||
1227 | .ndo_change_mtu = eth_change_mtu, | ||
1228 | }; | ||
1229 | |||
1217 | static int __devinit ioc3_probe(struct pci_dev *pdev, | 1230 | static int __devinit ioc3_probe(struct pci_dev *pdev, |
1218 | const struct pci_device_id *ent) | 1231 | const struct pci_device_id *ent) |
1219 | { | 1232 | { |
@@ -1310,15 +1323,8 @@ static int __devinit ioc3_probe(struct pci_dev *pdev, | |||
1310 | ioc3_get_eaddr(ip); | 1323 | ioc3_get_eaddr(ip); |
1311 | 1324 | ||
1312 | /* The IOC3-specific entries in the device structure. */ | 1325 | /* The IOC3-specific entries in the device structure. */ |
1313 | dev->open = ioc3_open; | ||
1314 | dev->hard_start_xmit = ioc3_start_xmit; | ||
1315 | dev->tx_timeout = ioc3_timeout; | ||
1316 | dev->watchdog_timeo = 5 * HZ; | 1326 | dev->watchdog_timeo = 5 * HZ; |
1317 | dev->stop = ioc3_close; | 1327 | dev->netdev_ops = &ioc3_netdev_ops; |
1318 | dev->get_stats = ioc3_get_stats; | ||
1319 | dev->do_ioctl = ioc3_ioctl; | ||
1320 | dev->set_multicast_list = ioc3_set_multicast_list; | ||
1321 | dev->set_mac_address = ioc3_set_mac_address; | ||
1322 | dev->ethtool_ops = &ioc3_ethtool_ops; | 1328 | dev->ethtool_ops = &ioc3_ethtool_ops; |
1323 | dev->features = NETIF_F_IP_CSUM; | 1329 | dev->features = NETIF_F_IP_CSUM; |
1324 | 1330 | ||
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c index 3126678bdd3c..73585fd8f29f 100644 --- a/drivers/net/isa-skeleton.c +++ b/drivers/net/isa-skeleton.c | |||
@@ -181,6 +181,18 @@ out: | |||
181 | } | 181 | } |
182 | #endif | 182 | #endif |
183 | 183 | ||
184 | static const struct net_device_ops netcard_netdev_ops = { | ||
185 | .ndo_open = net_open, | ||
186 | .ndo_stop = net_close, | ||
187 | .ndo_start_xmit = net_send_packet, | ||
188 | .ndo_get_stats = net_get_stats, | ||
189 | .ndo_set_multicast_list = set_multicast_list, | ||
190 | .ndo_tx_timeout = net_tx_timeout, | ||
191 | .ndo_validate_addr = eth_validate_addr, | ||
192 | .ndo_set_mac_address = eth_mac_addr, | ||
193 | .ndo_change_mtu = eth_change_mtu, | ||
194 | }; | ||
195 | |||
184 | /* | 196 | /* |
185 | * This is the real probe routine. Linux has a history of friendly device | 197 | * This is the real probe routine. Linux has a history of friendly device |
186 | * probes on the ISA bus. A good device probes avoids doing writes, and | 198 | * probes on the ISA bus. A good device probes avoids doing writes, and |
@@ -303,13 +315,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) | |||
303 | np = netdev_priv(dev); | 315 | np = netdev_priv(dev); |
304 | spin_lock_init(&np->lock); | 316 | spin_lock_init(&np->lock); |
305 | 317 | ||
306 | dev->open = net_open; | 318 | dev->netdev_ops = &netcard_netdev_ops; |
307 | dev->stop = net_close; | ||
308 | dev->hard_start_xmit = net_send_packet; | ||
309 | dev->get_stats = net_get_stats; | ||
310 | dev->set_multicast_list = &set_multicast_list; | ||
311 | |||
312 | dev->tx_timeout = &net_tx_timeout; | ||
313 | dev->watchdog_timeo = MY_TX_TIMEOUT; | 319 | dev->watchdog_timeo = MY_TX_TIMEOUT; |
314 | 320 | ||
315 | err = register_netdev(dev); | 321 | err = register_netdev(dev); |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index de4db0dc7879..4791238c3f6e 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -885,61 +885,6 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) | |||
885 | } | 885 | } |
886 | 886 | ||
887 | /** | 887 | /** |
888 | * ixgbe_blink_led_start_82598 - Blink LED based on index. | ||
889 | * @hw: pointer to hardware structure | ||
890 | * @index: led number to blink | ||
891 | **/ | ||
892 | static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index) | ||
893 | { | ||
894 | ixgbe_link_speed speed = 0; | ||
895 | bool link_up = 0; | ||
896 | u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | ||
897 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
898 | |||
899 | /* | ||
900 | * Link must be up to auto-blink the LEDs on the 82598EB MAC; | ||
901 | * force it if link is down. | ||
902 | */ | ||
903 | hw->mac.ops.check_link(hw, &speed, &link_up, false); | ||
904 | |||
905 | if (!link_up) { | ||
906 | autoc_reg |= IXGBE_AUTOC_FLU; | ||
907 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | ||
908 | msleep(10); | ||
909 | } | ||
910 | |||
911 | led_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
912 | led_reg |= IXGBE_LED_BLINK(index); | ||
913 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); | ||
914 | IXGBE_WRITE_FLUSH(hw); | ||
915 | |||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | /** | ||
920 | * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index. | ||
921 | * @hw: pointer to hardware structure | ||
922 | * @index: led number to stop blinking | ||
923 | **/ | ||
924 | static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index) | ||
925 | { | ||
926 | u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | ||
927 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
928 | |||
929 | autoc_reg &= ~IXGBE_AUTOC_FLU; | ||
930 | autoc_reg |= IXGBE_AUTOC_AN_RESTART; | ||
931 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | ||
932 | |||
933 | led_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
934 | led_reg &= ~IXGBE_LED_BLINK(index); | ||
935 | led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); | ||
936 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); | ||
937 | IXGBE_WRITE_FLUSH(hw); | ||
938 | |||
939 | return 0; | ||
940 | } | ||
941 | |||
942 | /** | ||
943 | * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register | 888 | * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register |
944 | * @hw: pointer to hardware structure | 889 | * @hw: pointer to hardware structure |
945 | * @reg: analog register to read | 890 | * @reg: analog register to read |
@@ -1128,8 +1073,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = { | |||
1128 | .get_link_capabilities = &ixgbe_get_link_capabilities_82598, | 1073 | .get_link_capabilities = &ixgbe_get_link_capabilities_82598, |
1129 | .led_on = &ixgbe_led_on_generic, | 1074 | .led_on = &ixgbe_led_on_generic, |
1130 | .led_off = &ixgbe_led_off_generic, | 1075 | .led_off = &ixgbe_led_off_generic, |
1131 | .blink_led_start = &ixgbe_blink_led_start_82598, | 1076 | .blink_led_start = &ixgbe_blink_led_start_generic, |
1132 | .blink_led_stop = &ixgbe_blink_led_stop_82598, | 1077 | .blink_led_stop = &ixgbe_blink_led_stop_generic, |
1133 | .set_rar = &ixgbe_set_rar_generic, | 1078 | .set_rar = &ixgbe_set_rar_generic, |
1134 | .clear_rar = &ixgbe_clear_rar_generic, | 1079 | .clear_rar = &ixgbe_clear_rar_generic, |
1135 | .set_vmdq = &ixgbe_set_vmdq_82598, | 1080 | .set_vmdq = &ixgbe_set_vmdq_82598, |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index beae7e012609..29771fbaa42d 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -68,8 +68,6 @@ s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq); | |||
68 | s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, | 68 | s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, |
69 | u32 vind, bool vlan_on); | 69 | u32 vind, bool vlan_on); |
70 | s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw); | 70 | s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw); |
71 | s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index); | ||
72 | s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index); | ||
73 | s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); | 71 | s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); |
74 | s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); | 72 | s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); |
75 | s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); | 73 | s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); |
@@ -991,40 +989,6 @@ s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw) | |||
991 | } | 989 | } |
992 | 990 | ||
993 | /** | 991 | /** |
994 | * ixgbe_blink_led_start_82599 - Blink LED based on index. | ||
995 | * @hw: pointer to hardware structure | ||
996 | * @index: led number to blink | ||
997 | **/ | ||
998 | s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index) | ||
999 | { | ||
1000 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
1001 | |||
1002 | led_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
1003 | led_reg |= IXGBE_LED_BLINK(index); | ||
1004 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); | ||
1005 | IXGBE_WRITE_FLUSH(hw); | ||
1006 | |||
1007 | return 0; | ||
1008 | } | ||
1009 | |||
1010 | /** | ||
1011 | * ixgbe_blink_led_stop_82599 - Stop blinking LED based on index. | ||
1012 | * @hw: pointer to hardware structure | ||
1013 | * @index: led number to stop blinking | ||
1014 | **/ | ||
1015 | s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index) | ||
1016 | { | ||
1017 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
1018 | |||
1019 | led_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
1020 | led_reg &= ~IXGBE_LED_BLINK(index); | ||
1021 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); | ||
1022 | IXGBE_WRITE_FLUSH(hw); | ||
1023 | |||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | /** | ||
1028 | * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array | 992 | * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array |
1029 | * @hw: pointer to hardware structure | 993 | * @hw: pointer to hardware structure |
1030 | **/ | 994 | **/ |
@@ -1243,8 +1207,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { | |||
1243 | .get_link_capabilities = &ixgbe_get_link_capabilities_82599, | 1207 | .get_link_capabilities = &ixgbe_get_link_capabilities_82599, |
1244 | .led_on = &ixgbe_led_on_generic, | 1208 | .led_on = &ixgbe_led_on_generic, |
1245 | .led_off = &ixgbe_led_off_generic, | 1209 | .led_off = &ixgbe_led_off_generic, |
1246 | .blink_led_start = &ixgbe_blink_led_start_82599, | 1210 | .blink_led_start = &ixgbe_blink_led_start_generic, |
1247 | .blink_led_stop = &ixgbe_blink_led_stop_82599, | 1211 | .blink_led_stop = &ixgbe_blink_led_stop_generic, |
1248 | .set_rar = &ixgbe_set_rar_generic, | 1212 | .set_rar = &ixgbe_set_rar_generic, |
1249 | .clear_rar = &ixgbe_clear_rar_generic, | 1213 | .clear_rar = &ixgbe_clear_rar_generic, |
1250 | .set_vmdq = &ixgbe_set_vmdq_82599, | 1214 | .set_vmdq = &ixgbe_set_vmdq_82599, |
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 63ab6671d08e..5567519676d5 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -2071,3 +2071,58 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) | |||
2071 | 2071 | ||
2072 | return 0; | 2072 | return 0; |
2073 | } | 2073 | } |
2074 | |||
2075 | /** | ||
2076 | * ixgbe_blink_led_start_generic - Blink LED based on index. | ||
2077 | * @hw: pointer to hardware structure | ||
2078 | * @index: led number to blink | ||
2079 | **/ | ||
2080 | s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) | ||
2081 | { | ||
2082 | ixgbe_link_speed speed = 0; | ||
2083 | bool link_up = 0; | ||
2084 | u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | ||
2085 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
2086 | |||
2087 | /* | ||
2088 | * Link must be up to auto-blink the LEDs; | ||
2089 | * Force it if link is down. | ||
2090 | */ | ||
2091 | hw->mac.ops.check_link(hw, &speed, &link_up, false); | ||
2092 | |||
2093 | if (!link_up) { | ||
2094 | autoc_reg |= IXGBE_AUTOC_FLU; | ||
2095 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | ||
2096 | msleep(10); | ||
2097 | } | ||
2098 | |||
2099 | led_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
2100 | led_reg |= IXGBE_LED_BLINK(index); | ||
2101 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); | ||
2102 | IXGBE_WRITE_FLUSH(hw); | ||
2103 | |||
2104 | return 0; | ||
2105 | } | ||
2106 | |||
2107 | /** | ||
2108 | * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. | ||
2109 | * @hw: pointer to hardware structure | ||
2110 | * @index: led number to stop blinking | ||
2111 | **/ | ||
2112 | s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) | ||
2113 | { | ||
2114 | u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | ||
2115 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
2116 | |||
2117 | autoc_reg &= ~IXGBE_AUTOC_FLU; | ||
2118 | autoc_reg |= IXGBE_AUTOC_AN_RESTART; | ||
2119 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | ||
2120 | |||
2121 | led_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
2122 | led_reg &= ~IXGBE_LED_BLINK(index); | ||
2123 | led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); | ||
2124 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); | ||
2125 | IXGBE_WRITE_FLUSH(hw); | ||
2126 | |||
2127 | return 0; | ||
2128 | } | ||
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 24f73e719c3f..dd260890ad0a 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h | |||
@@ -76,6 +76,9 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); | |||
76 | s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); | 76 | s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); |
77 | s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); | 77 | s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); |
78 | 78 | ||
79 | s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); | ||
80 | s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); | ||
81 | |||
79 | #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) | 82 | #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) |
80 | 83 | ||
81 | #ifndef writeq | 84 | #ifndef writeq |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index aafc120f164e..f0a20facc650 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -943,6 +943,24 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, | |||
943 | } | 943 | } |
944 | 944 | ||
945 | 945 | ||
946 | static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, | ||
947 | struct ethtool_wolinfo *wol) | ||
948 | { | ||
949 | struct ixgbe_hw *hw = &adapter->hw; | ||
950 | int retval = 1; | ||
951 | |||
952 | switch(hw->device_id) { | ||
953 | case IXGBE_DEV_ID_82599_KX4: | ||
954 | retval = 0; | ||
955 | break; | ||
956 | default: | ||
957 | wol->supported = 0; | ||
958 | retval = 0; | ||
959 | } | ||
960 | |||
961 | return retval; | ||
962 | } | ||
963 | |||
946 | static void ixgbe_get_wol(struct net_device *netdev, | 964 | static void ixgbe_get_wol(struct net_device *netdev, |
947 | struct ethtool_wolinfo *wol) | 965 | struct ethtool_wolinfo *wol) |
948 | { | 966 | { |
@@ -952,7 +970,8 @@ static void ixgbe_get_wol(struct net_device *netdev, | |||
952 | WAKE_BCAST | WAKE_MAGIC; | 970 | WAKE_BCAST | WAKE_MAGIC; |
953 | wol->wolopts = 0; | 971 | wol->wolopts = 0; |
954 | 972 | ||
955 | if (!device_can_wakeup(&adapter->pdev->dev)) | 973 | if (ixgbe_wol_exclusion(adapter, wol) || |
974 | !device_can_wakeup(&adapter->pdev->dev)) | ||
956 | return; | 975 | return; |
957 | 976 | ||
958 | if (adapter->wol & IXGBE_WUFC_EX) | 977 | if (adapter->wol & IXGBE_WUFC_EX) |
@@ -974,6 +993,9 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
974 | if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) | 993 | if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) |
975 | return -EOPNOTSUPP; | 994 | return -EOPNOTSUPP; |
976 | 995 | ||
996 | if (ixgbe_wol_exclusion(adapter, wol)) | ||
997 | return wol->wolopts ? -EOPNOTSUPP : 0; | ||
998 | |||
977 | adapter->wol = 0; | 999 | adapter->wol = 0; |
978 | 1000 | ||
979 | if (wol->wolopts & WAKE_UCAST) | 1001 | if (wol->wolopts & WAKE_UCAST) |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 9ef128ae6458..febde45cf9fa 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -2723,17 +2723,21 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | |||
2723 | **/ | 2723 | **/ |
2724 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | 2724 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) |
2725 | { | 2725 | { |
2726 | /* Start with base case */ | ||
2727 | adapter->num_rx_queues = 1; | ||
2728 | adapter->num_tx_queues = 1; | ||
2729 | |||
2730 | #ifdef CONFIG_IXGBE_DCB | 2726 | #ifdef CONFIG_IXGBE_DCB |
2731 | if (ixgbe_set_dcb_queues(adapter)) | 2727 | if (ixgbe_set_dcb_queues(adapter)) |
2732 | return; | 2728 | goto done; |
2733 | 2729 | ||
2734 | #endif | 2730 | #endif |
2735 | if (ixgbe_set_rss_queues(adapter)) | 2731 | if (ixgbe_set_rss_queues(adapter)) |
2736 | return; | 2732 | goto done; |
2733 | |||
2734 | /* fallback to base case */ | ||
2735 | adapter->num_rx_queues = 1; | ||
2736 | adapter->num_tx_queues = 1; | ||
2737 | |||
2738 | done: | ||
2739 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ | ||
2740 | adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; | ||
2737 | } | 2741 | } |
2738 | 2742 | ||
2739 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | 2743 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, |
@@ -2992,9 +2996,6 @@ try_msi: | |||
2992 | } | 2996 | } |
2993 | 2997 | ||
2994 | out: | 2998 | out: |
2995 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ | ||
2996 | adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; | ||
2997 | |||
2998 | return err; | 2999 | return err; |
2999 | } | 3000 | } |
3000 | 3001 | ||
@@ -3611,9 +3612,9 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
3611 | 3612 | ||
3612 | return 0; | 3613 | return 0; |
3613 | } | 3614 | } |
3614 | |||
3615 | #endif /* CONFIG_PM */ | 3615 | #endif /* CONFIG_PM */ |
3616 | static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) | 3616 | |
3617 | static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | ||
3617 | { | 3618 | { |
3618 | struct net_device *netdev = pci_get_drvdata(pdev); | 3619 | struct net_device *netdev = pci_get_drvdata(pdev); |
3619 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 3620 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
@@ -3672,18 +3673,46 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3672 | pci_enable_wake(pdev, PCI_D3cold, 0); | 3673 | pci_enable_wake(pdev, PCI_D3cold, 0); |
3673 | } | 3674 | } |
3674 | 3675 | ||
3676 | *enable_wake = !!wufc; | ||
3677 | |||
3675 | ixgbe_release_hw_control(adapter); | 3678 | ixgbe_release_hw_control(adapter); |
3676 | 3679 | ||
3677 | pci_disable_device(pdev); | 3680 | pci_disable_device(pdev); |
3678 | 3681 | ||
3679 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 3682 | return 0; |
3683 | } | ||
3684 | |||
3685 | #ifdef CONFIG_PM | ||
3686 | static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) | ||
3687 | { | ||
3688 | int retval; | ||
3689 | bool wake; | ||
3690 | |||
3691 | retval = __ixgbe_shutdown(pdev, &wake); | ||
3692 | if (retval) | ||
3693 | return retval; | ||
3694 | |||
3695 | if (wake) { | ||
3696 | pci_prepare_to_sleep(pdev); | ||
3697 | } else { | ||
3698 | pci_wake_from_d3(pdev, false); | ||
3699 | pci_set_power_state(pdev, PCI_D3hot); | ||
3700 | } | ||
3680 | 3701 | ||
3681 | return 0; | 3702 | return 0; |
3682 | } | 3703 | } |
3704 | #endif /* CONFIG_PM */ | ||
3683 | 3705 | ||
3684 | static void ixgbe_shutdown(struct pci_dev *pdev) | 3706 | static void ixgbe_shutdown(struct pci_dev *pdev) |
3685 | { | 3707 | { |
3686 | ixgbe_suspend(pdev, PMSG_SUSPEND); | 3708 | bool wake; |
3709 | |||
3710 | __ixgbe_shutdown(pdev, &wake); | ||
3711 | |||
3712 | if (system_state == SYSTEM_POWER_OFF) { | ||
3713 | pci_wake_from_d3(pdev, wake); | ||
3714 | pci_set_power_state(pdev, PCI_D3hot); | ||
3715 | } | ||
3687 | } | 3716 | } |
3688 | 3717 | ||
3689 | /** | 3718 | /** |
@@ -4342,7 +4371,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4342 | int count = 0; | 4371 | int count = 0; |
4343 | unsigned int f; | 4372 | unsigned int f; |
4344 | 4373 | ||
4345 | r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; | 4374 | r_idx = skb->queue_mapping; |
4346 | tx_ring = &adapter->tx_ring[r_idx]; | 4375 | tx_ring = &adapter->tx_ring[r_idx]; |
4347 | 4376 | ||
4348 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | 4377 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { |
diff --git a/drivers/net/jme.c b/drivers/net/jme.c index ece35040288c..621a7c0c46ba 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c | |||
@@ -2591,13 +2591,13 @@ static int | |||
2591 | jme_pci_dma64(struct pci_dev *pdev) | 2591 | jme_pci_dma64(struct pci_dev *pdev) |
2592 | { | 2592 | { |
2593 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && | 2593 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && |
2594 | !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) | 2594 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) |
2595 | if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) | 2595 | if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) |
2596 | return 1; | 2596 | return 1; |
2597 | 2597 | ||
2598 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && | 2598 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && |
2599 | !pci_set_dma_mask(pdev, DMA_40BIT_MASK)) | 2599 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) |
2600 | if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) | 2600 | if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) |
2601 | return 1; | 2601 | return 1; |
2602 | 2602 | ||
2603 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) | 2603 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) |
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index 380a1a54d530..384e072de2e7 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c | |||
@@ -168,6 +168,17 @@ writereg(struct net_device *dev, int portno, int value) | |||
168 | nubus_writew(swab16(value), dev->mem_start + portno); | 168 | nubus_writew(swab16(value), dev->mem_start + portno); |
169 | } | 169 | } |
170 | 170 | ||
171 | static const struct net_device_ops mac89x0_netdev_ops = { | ||
172 | .ndo_open = net_open, | ||
173 | .ndo_stop = net_close, | ||
174 | .ndo_start_xmit = net_send_packet, | ||
175 | .ndo_get_stats = net_get_stats, | ||
176 | .ndo_set_multicast_list = set_multicast_list, | ||
177 | .ndo_set_mac_address = set_mac_address, | ||
178 | .ndo_validate_addr = eth_validate_addr, | ||
179 | .ndo_change_mtu = eth_change_mtu, | ||
180 | }; | ||
181 | |||
171 | /* Probe for the CS8900 card in slot E. We won't bother looking | 182 | /* Probe for the CS8900 card in slot E. We won't bother looking |
172 | anywhere else until we have a really good reason to do so. */ | 183 | anywhere else until we have a really good reason to do so. */ |
173 | struct net_device * __init mac89x0_probe(int unit) | 184 | struct net_device * __init mac89x0_probe(int unit) |
@@ -280,12 +291,7 @@ struct net_device * __init mac89x0_probe(int unit) | |||
280 | 291 | ||
281 | printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr); | 292 | printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr); |
282 | 293 | ||
283 | dev->open = net_open; | 294 | dev->netdev_ops = &mac89x0_netdev_ops; |
284 | dev->stop = net_close; | ||
285 | dev->hard_start_xmit = net_send_packet; | ||
286 | dev->get_stats = net_get_stats; | ||
287 | dev->set_multicast_list = &set_multicast_list; | ||
288 | dev->set_mac_address = &set_mac_address; | ||
289 | 295 | ||
290 | err = register_netdev(dev); | 296 | err = register_netdev(dev); |
291 | if (err) | 297 | if (err) |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f50501013b1c..46073de290cf 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -1100,6 +1100,18 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1100 | return phy_mii_ioctl(phydev, if_mii(rq), cmd); | 1100 | return phy_mii_ioctl(phydev, if_mii(rq), cmd); |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | static const struct net_device_ops macb_netdev_ops = { | ||
1104 | .ndo_open = macb_open, | ||
1105 | .ndo_stop = macb_close, | ||
1106 | .ndo_start_xmit = macb_start_xmit, | ||
1107 | .ndo_set_multicast_list = macb_set_rx_mode, | ||
1108 | .ndo_get_stats = macb_get_stats, | ||
1109 | .ndo_do_ioctl = macb_ioctl, | ||
1110 | .ndo_validate_addr = eth_validate_addr, | ||
1111 | .ndo_change_mtu = eth_change_mtu, | ||
1112 | .ndo_set_mac_address = eth_mac_addr, | ||
1113 | }; | ||
1114 | |||
1103 | static int __init macb_probe(struct platform_device *pdev) | 1115 | static int __init macb_probe(struct platform_device *pdev) |
1104 | { | 1116 | { |
1105 | struct eth_platform_data *pdata; | 1117 | struct eth_platform_data *pdata; |
@@ -1175,12 +1187,7 @@ static int __init macb_probe(struct platform_device *pdev) | |||
1175 | goto err_out_iounmap; | 1187 | goto err_out_iounmap; |
1176 | } | 1188 | } |
1177 | 1189 | ||
1178 | dev->open = macb_open; | 1190 | dev->netdev_ops = &macb_netdev_ops; |
1179 | dev->stop = macb_close; | ||
1180 | dev->hard_start_xmit = macb_start_xmit; | ||
1181 | dev->get_stats = macb_get_stats; | ||
1182 | dev->set_multicast_list = macb_set_rx_mode; | ||
1183 | dev->do_ioctl = macb_ioctl; | ||
1184 | netif_napi_add(dev, &bp->napi, macb_poll, 64); | 1191 | netif_napi_add(dev, &bp->napi, macb_poll, 64); |
1185 | dev->ethtool_ops = &macb_ethtool_ops; | 1192 | dev->ethtool_ops = &macb_ethtool_ops; |
1186 | 1193 | ||
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 527166e35d56..acd143da161d 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c | |||
@@ -167,6 +167,18 @@ static int macsonic_close(struct net_device* dev) | |||
167 | return err; | 167 | return err; |
168 | } | 168 | } |
169 | 169 | ||
170 | static const struct net_device_ops macsonic_netdev_ops = { | ||
171 | .ndo_open = macsonic_open, | ||
172 | .ndo_stop = macsonic_close, | ||
173 | .ndo_start_xmit = sonic_send_packet, | ||
174 | .ndo_set_multicast_list = sonic_multicast_list, | ||
175 | .ndo_tx_timeout = sonic_tx_timeout, | ||
176 | .ndo_get_stats = sonic_get_stats, | ||
177 | .ndo_validate_addr = eth_validate_addr, | ||
178 | .ndo_change_mtu = eth_change_mtu, | ||
179 | .ndo_set_mac_address = eth_mac_addr, | ||
180 | }; | ||
181 | |||
170 | static int __init macsonic_init(struct net_device *dev) | 182 | static int __init macsonic_init(struct net_device *dev) |
171 | { | 183 | { |
172 | struct sonic_local* lp = netdev_priv(dev); | 184 | struct sonic_local* lp = netdev_priv(dev); |
@@ -198,12 +210,7 @@ static int __init macsonic_init(struct net_device *dev) | |||
198 | lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS | 210 | lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS |
199 | * SONIC_BUS_SCALE(lp->dma_bitmode)); | 211 | * SONIC_BUS_SCALE(lp->dma_bitmode)); |
200 | 212 | ||
201 | dev->open = macsonic_open; | 213 | dev->netdev_ops = &macsonic_netdev_ops; |
202 | dev->stop = macsonic_close; | ||
203 | dev->hard_start_xmit = sonic_send_packet; | ||
204 | dev->get_stats = sonic_get_stats; | ||
205 | dev->set_multicast_list = &sonic_multicast_list; | ||
206 | dev->tx_timeout = sonic_tx_timeout; | ||
207 | dev->watchdog_timeo = TX_TIMEOUT; | 214 | dev->watchdog_timeo = TX_TIMEOUT; |
208 | 215 | ||
209 | /* | 216 | /* |
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c index 7cce3342ef8c..606aa58afdea 100644 --- a/drivers/net/mlx4/port.c +++ b/drivers/net/mlx4/port.c | |||
@@ -299,13 +299,14 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) | |||
299 | struct mlx4_cmd_mailbox *mailbox; | 299 | struct mlx4_cmd_mailbox *mailbox; |
300 | int err; | 300 | int err; |
301 | 301 | ||
302 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) | ||
303 | return 0; | ||
304 | |||
302 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 305 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
303 | if (IS_ERR(mailbox)) | 306 | if (IS_ERR(mailbox)) |
304 | return PTR_ERR(mailbox); | 307 | return PTR_ERR(mailbox); |
305 | 308 | ||
306 | memset(mailbox->buf, 0, 256); | 309 | memset(mailbox->buf, 0, 256); |
307 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) | ||
308 | return 0; | ||
309 | 310 | ||
310 | ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; | 311 | ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; |
311 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, | 312 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index a56d9d2df73f..b3185bf2c158 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -2274,8 +2274,6 @@ static void port_start(struct mv643xx_eth_private *mp) | |||
2274 | pscr |= FORCE_LINK_PASS; | 2274 | pscr |= FORCE_LINK_PASS; |
2275 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); | 2275 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); |
2276 | 2276 | ||
2277 | wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); | ||
2278 | |||
2279 | /* | 2277 | /* |
2280 | * Configure TX path and queues. | 2278 | * Configure TX path and queues. |
2281 | */ | 2279 | */ |
@@ -2957,6 +2955,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2957 | 2955 | ||
2958 | netif_carrier_off(dev); | 2956 | netif_carrier_off(dev); |
2959 | 2957 | ||
2958 | wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); | ||
2959 | |||
2960 | set_rx_coal(mp, 250); | 2960 | set_rx_coal(mp, 250); |
2961 | set_tx_coal(mp, 0); | 2961 | set_tx_coal(mp, 0); |
2962 | 2962 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 9eed126a82f0..f2c4a665e93f 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -2447,6 +2447,7 @@ static int myri10ge_open(struct net_device *dev) | |||
2447 | lro_mgr->lro_arr = ss->rx_done.lro_desc; | 2447 | lro_mgr->lro_arr = ss->rx_done.lro_desc; |
2448 | lro_mgr->get_frag_header = myri10ge_get_frag_header; | 2448 | lro_mgr->get_frag_header = myri10ge_get_frag_header; |
2449 | lro_mgr->max_aggr = myri10ge_lro_max_pkts; | 2449 | lro_mgr->max_aggr = myri10ge_lro_max_pkts; |
2450 | lro_mgr->frag_align_pad = 2; | ||
2450 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) | 2451 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) |
2451 | lro_mgr->max_aggr = MAX_SKB_FRAGS; | 2452 | lro_mgr->max_aggr = MAX_SKB_FRAGS; |
2452 | 2453 | ||
diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 73cac6c78cb6..2b1745328cf7 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c | |||
@@ -4834,6 +4834,7 @@ static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) | |||
4834 | { | 4834 | { |
4835 | u64 val = 0; | 4835 | u64 val = 0; |
4836 | 4836 | ||
4837 | *ret = 0; | ||
4837 | switch (rp->rbr_block_size) { | 4838 | switch (rp->rbr_block_size) { |
4838 | case 4 * 1024: | 4839 | case 4 * 1024: |
4839 | val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); | 4840 | val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); |
@@ -9542,7 +9543,7 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np, | |||
9542 | 9543 | ||
9543 | plat_dev = platform_device_register_simple("niu", niu_parent_index, | 9544 | plat_dev = platform_device_register_simple("niu", niu_parent_index, |
9544 | NULL, 0); | 9545 | NULL, 0); |
9545 | if (!plat_dev) | 9546 | if (IS_ERR(plat_dev)) |
9546 | return NULL; | 9547 | return NULL; |
9547 | 9548 | ||
9548 | for (i = 0; attr_name(niu_parent_attributes[i]); i++) { | 9549 | for (i = 0; attr_name(niu_parent_attributes[i]); i++) { |
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index cf24cc34debe..e7070515d2e3 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mii.h> | 19 | #include <linux/mii.h> |
20 | #include <linux/phy.h> | 20 | #include <linux/phy.h> |
21 | #include <linux/phy_fixed.h> | 21 | #include <linux/phy_fixed.h> |
22 | #include <linux/err.h> | ||
22 | 23 | ||
23 | #define MII_REGS_NUM 29 | 24 | #define MII_REGS_NUM 29 |
24 | 25 | ||
@@ -207,8 +208,8 @@ static int __init fixed_mdio_bus_init(void) | |||
207 | int ret; | 208 | int ret; |
208 | 209 | ||
209 | pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); | 210 | pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); |
210 | if (!pdev) { | 211 | if (IS_ERR(pdev)) { |
211 | ret = -ENOMEM; | 212 | ret = PTR_ERR(pdev); |
212 | goto err_pdev; | 213 | goto err_pdev; |
213 | } | 214 | } |
214 | 215 | ||
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index eb6411c4694f..7a3ec9d39a9a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -69,6 +69,11 @@ | |||
69 | #define MII_M1111_COPPER 0 | 69 | #define MII_M1111_COPPER 0 |
70 | #define MII_M1111_FIBER 1 | 70 | #define MII_M1111_FIBER 1 |
71 | 71 | ||
72 | #define MII_88E1121_PHY_LED_CTRL 16 | ||
73 | #define MII_88E1121_PHY_LED_PAGE 3 | ||
74 | #define MII_88E1121_PHY_LED_DEF 0x0030 | ||
75 | #define MII_88E1121_PHY_PAGE 22 | ||
76 | |||
72 | #define MII_M1011_PHY_STATUS 0x11 | 77 | #define MII_M1011_PHY_STATUS 0x11 |
73 | #define MII_M1011_PHY_STATUS_1000 0x8000 | 78 | #define MII_M1011_PHY_STATUS_1000 0x8000 |
74 | #define MII_M1011_PHY_STATUS_100 0x4000 | 79 | #define MII_M1011_PHY_STATUS_100 0x4000 |
@@ -154,6 +159,30 @@ static int marvell_config_aneg(struct phy_device *phydev) | |||
154 | return err; | 159 | return err; |
155 | } | 160 | } |
156 | 161 | ||
162 | static int m88e1121_config_aneg(struct phy_device *phydev) | ||
163 | { | ||
164 | int err, temp; | ||
165 | |||
166 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); | ||
167 | if (err < 0) | ||
168 | return err; | ||
169 | |||
170 | err = phy_write(phydev, MII_M1011_PHY_SCR, | ||
171 | MII_M1011_PHY_SCR_AUTO_CROSS); | ||
172 | if (err < 0) | ||
173 | return err; | ||
174 | |||
175 | temp = phy_read(phydev, MII_88E1121_PHY_PAGE); | ||
176 | |||
177 | phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); | ||
178 | phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); | ||
179 | phy_write(phydev, MII_88E1121_PHY_PAGE, temp); | ||
180 | |||
181 | err = genphy_config_aneg(phydev); | ||
182 | |||
183 | return err; | ||
184 | } | ||
185 | |||
157 | static int m88e1111_config_init(struct phy_device *phydev) | 186 | static int m88e1111_config_init(struct phy_device *phydev) |
158 | { | 187 | { |
159 | int err; | 188 | int err; |
@@ -429,6 +458,18 @@ static int marvell_read_status(struct phy_device *phydev) | |||
429 | return 0; | 458 | return 0; |
430 | } | 459 | } |
431 | 460 | ||
461 | static int m88e1121_did_interrupt(struct phy_device *phydev) | ||
462 | { | ||
463 | int imask; | ||
464 | |||
465 | imask = phy_read(phydev, MII_M1011_IEVENT); | ||
466 | |||
467 | if (imask & MII_M1011_IMASK_INIT) | ||
468 | return 1; | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
432 | static struct phy_driver marvell_drivers[] = { | 473 | static struct phy_driver marvell_drivers[] = { |
433 | { | 474 | { |
434 | .phy_id = 0x01410c60, | 475 | .phy_id = 0x01410c60, |
@@ -482,6 +523,19 @@ static struct phy_driver marvell_drivers[] = { | |||
482 | .driver = {.owner = THIS_MODULE,}, | 523 | .driver = {.owner = THIS_MODULE,}, |
483 | }, | 524 | }, |
484 | { | 525 | { |
526 | .phy_id = 0x01410cb0, | ||
527 | .phy_id_mask = 0xfffffff0, | ||
528 | .name = "Marvell 88E1121R", | ||
529 | .features = PHY_GBIT_FEATURES, | ||
530 | .flags = PHY_HAS_INTERRUPT, | ||
531 | .config_aneg = &m88e1121_config_aneg, | ||
532 | .read_status = &marvell_read_status, | ||
533 | .ack_interrupt = &marvell_ack_interrupt, | ||
534 | .config_intr = &marvell_config_intr, | ||
535 | .did_interrupt = &m88e1121_did_interrupt, | ||
536 | .driver = { .owner = THIS_MODULE }, | ||
537 | }, | ||
538 | { | ||
485 | .phy_id = 0x01410cd0, | 539 | .phy_id = 0x01410cd0, |
486 | .phy_id_mask = 0xfffffff0, | 540 | .phy_id_mask = 0xfffffff0, |
487 | .name = "Marvell 88E1145", | 541 | .name = "Marvell 88E1145", |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 3ff1f425f1bb..61755cbd978e 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -434,7 +434,7 @@ void phy_start_machine(struct phy_device *phydev, | |||
434 | phydev->adjust_state = handler; | 434 | phydev->adjust_state = handler; |
435 | 435 | ||
436 | INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); | 436 | INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); |
437 | schedule_delayed_work(&phydev->state_queue, jiffies + HZ); | 437 | schedule_delayed_work(&phydev->state_queue, HZ); |
438 | } | 438 | } |
439 | 439 | ||
440 | /** | 440 | /** |
@@ -655,6 +655,10 @@ static void phy_change(struct work_struct *work) | |||
655 | struct phy_device *phydev = | 655 | struct phy_device *phydev = |
656 | container_of(work, struct phy_device, phy_queue); | 656 | container_of(work, struct phy_device, phy_queue); |
657 | 657 | ||
658 | if (phydev->drv->did_interrupt && | ||
659 | !phydev->drv->did_interrupt(phydev)) | ||
660 | goto ignore; | ||
661 | |||
658 | err = phy_disable_interrupts(phydev); | 662 | err = phy_disable_interrupts(phydev); |
659 | 663 | ||
660 | if (err) | 664 | if (err) |
@@ -681,6 +685,11 @@ static void phy_change(struct work_struct *work) | |||
681 | 685 | ||
682 | return; | 686 | return; |
683 | 687 | ||
688 | ignore: | ||
689 | atomic_dec(&phydev->irq_disable); | ||
690 | enable_irq(phydev->irq); | ||
691 | return; | ||
692 | |||
684 | irq_enable_err: | 693 | irq_enable_err: |
685 | disable_irq(phydev->irq); | 694 | disable_irq(phydev->irq); |
686 | atomic_inc(&phydev->irq_disable); | 695 | atomic_inc(&phydev->irq_disable); |
@@ -937,6 +946,5 @@ static void phy_state_machine(struct work_struct *work) | |||
937 | if (err < 0) | 946 | if (err < 0) |
938 | phy_error(phydev); | 947 | phy_error(phydev); |
939 | 948 | ||
940 | schedule_delayed_work(&phydev->state_queue, | 949 | schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); |
941 | jiffies + PHY_STATE_TIME * HZ); | ||
942 | } | 950 | } |
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 5e8540b6ffa1..6f97b47d74a6 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -160,6 +160,7 @@ MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>," | |||
160 | "Florian Fainelli <florian@openwrt.org>"); | 160 | "Florian Fainelli <florian@openwrt.org>"); |
161 | MODULE_LICENSE("GPL"); | 161 | MODULE_LICENSE("GPL"); |
162 | MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); | 162 | MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); |
163 | MODULE_VERSION(DRV_VERSION " " DRV_RELDATE); | ||
163 | 164 | ||
164 | /* RX and TX interrupts that we handle */ | 165 | /* RX and TX interrupts that we handle */ |
165 | #define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH) | 166 | #define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH) |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index dee23b159df2..7269a426051c 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -448,9 +448,6 @@ static void efx_init_channels(struct efx_nic *efx) | |||
448 | 448 | ||
449 | WARN_ON(channel->rx_pkt != NULL); | 449 | WARN_ON(channel->rx_pkt != NULL); |
450 | efx_rx_strategy(channel); | 450 | efx_rx_strategy(channel); |
451 | |||
452 | netif_napi_add(channel->napi_dev, &channel->napi_str, | ||
453 | efx_poll, napi_weight); | ||
454 | } | 451 | } |
455 | } | 452 | } |
456 | 453 | ||
@@ -1321,6 +1318,8 @@ static int efx_init_napi(struct efx_nic *efx) | |||
1321 | 1318 | ||
1322 | efx_for_each_channel(channel, efx) { | 1319 | efx_for_each_channel(channel, efx) { |
1323 | channel->napi_dev = efx->net_dev; | 1320 | channel->napi_dev = efx->net_dev; |
1321 | netif_napi_add(channel->napi_dev, &channel->napi_str, | ||
1322 | efx_poll, napi_weight); | ||
1324 | } | 1323 | } |
1325 | return 0; | 1324 | return 0; |
1326 | } | 1325 | } |
@@ -1330,6 +1329,8 @@ static void efx_fini_napi(struct efx_nic *efx) | |||
1330 | struct efx_channel *channel; | 1329 | struct efx_channel *channel; |
1331 | 1330 | ||
1332 | efx_for_each_channel(channel, efx) { | 1331 | efx_for_each_channel(channel, efx) { |
1332 | if (channel->napi_dev) | ||
1333 | netif_napi_del(&channel->napi_str); | ||
1333 | channel->napi_dev = NULL; | 1334 | channel->napi_dev = NULL; |
1334 | } | 1335 | } |
1335 | } | 1336 | } |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index d4629ab2c614..466a8abb0053 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -1176,9 +1176,9 @@ void falcon_sim_phy_event(struct efx_nic *efx) | |||
1176 | 1176 | ||
1177 | EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); | 1177 | EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); |
1178 | if (EFX_IS10G(efx)) | 1178 | if (EFX_IS10G(efx)) |
1179 | EFX_SET_OWORD_FIELD(phy_event, XG_PHY_INTR, 1); | 1179 | EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1); |
1180 | else | 1180 | else |
1181 | EFX_SET_OWORD_FIELD(phy_event, G_PHY0_INTR, 1); | 1181 | EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1); |
1182 | 1182 | ||
1183 | falcon_generate_event(&efx->channel[0], &phy_event); | 1183 | falcon_generate_event(&efx->channel[0], &phy_event); |
1184 | } | 1184 | } |
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 7b1882765a0c..3ab28bb00c12 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -1188,6 +1188,19 @@ out: | |||
1188 | return ret; | 1188 | return ret; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | static const struct net_device_ops sh_eth_netdev_ops = { | ||
1192 | .ndo_open = sh_eth_open, | ||
1193 | .ndo_stop = sh_eth_close, | ||
1194 | .ndo_start_xmit = sh_eth_start_xmit, | ||
1195 | .ndo_get_stats = sh_eth_get_stats, | ||
1196 | .ndo_set_multicast_list = sh_eth_set_multicast_list, | ||
1197 | .ndo_tx_timeout = sh_eth_tx_timeout, | ||
1198 | .ndo_do_ioctl = sh_eth_do_ioctl, | ||
1199 | .ndo_validate_addr = eth_validate_addr, | ||
1200 | .ndo_set_mac_address = eth_mac_addr, | ||
1201 | .ndo_change_mtu = eth_change_mtu, | ||
1202 | }; | ||
1203 | |||
1191 | static int sh_eth_drv_probe(struct platform_device *pdev) | 1204 | static int sh_eth_drv_probe(struct platform_device *pdev) |
1192 | { | 1205 | { |
1193 | int ret, i, devno = 0; | 1206 | int ret, i, devno = 0; |
@@ -1240,13 +1253,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1240 | mdp->edmac_endian = pd->edmac_endian; | 1253 | mdp->edmac_endian = pd->edmac_endian; |
1241 | 1254 | ||
1242 | /* set function */ | 1255 | /* set function */ |
1243 | ndev->open = sh_eth_open; | 1256 | ndev->netdev_ops = &sh_eth_netdev_ops; |
1244 | ndev->hard_start_xmit = sh_eth_start_xmit; | ||
1245 | ndev->stop = sh_eth_close; | ||
1246 | ndev->get_stats = sh_eth_get_stats; | ||
1247 | ndev->set_multicast_list = sh_eth_set_multicast_list; | ||
1248 | ndev->do_ioctl = sh_eth_do_ioctl; | ||
1249 | ndev->tx_timeout = sh_eth_tx_timeout; | ||
1250 | ndev->watchdog_timeo = TX_TIMEOUT; | 1257 | ndev->watchdog_timeo = TX_TIMEOUT; |
1251 | 1258 | ||
1252 | mdp->post_rx = POST_RX >> (devno << 1); | 1259 | mdp->post_rx = POST_RX >> (devno << 1); |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index b8978d4af1b7..c11cdd08ec57 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -2674,7 +2674,7 @@ static int skge_down(struct net_device *dev) | |||
2674 | if (netif_msg_ifdown(skge)) | 2674 | if (netif_msg_ifdown(skge)) |
2675 | printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); | 2675 | printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); |
2676 | 2676 | ||
2677 | netif_stop_queue(dev); | 2677 | netif_tx_disable(dev); |
2678 | 2678 | ||
2679 | if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) | 2679 | if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) |
2680 | del_timer_sync(&skge->link_timer); | 2680 | del_timer_sync(&skge->link_timer); |
@@ -2881,7 +2881,6 @@ static void skge_tx_clean(struct net_device *dev) | |||
2881 | } | 2881 | } |
2882 | 2882 | ||
2883 | skge->tx_ring.to_clean = e; | 2883 | skge->tx_ring.to_clean = e; |
2884 | netif_wake_queue(dev); | ||
2885 | } | 2884 | } |
2886 | 2885 | ||
2887 | static void skge_tx_timeout(struct net_device *dev) | 2886 | static void skge_tx_timeout(struct net_device *dev) |
@@ -2893,6 +2892,7 @@ static void skge_tx_timeout(struct net_device *dev) | |||
2893 | 2892 | ||
2894 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); | 2893 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); |
2895 | skge_tx_clean(dev); | 2894 | skge_tx_clean(dev); |
2895 | netif_wake_queue(dev); | ||
2896 | } | 2896 | } |
2897 | 2897 | ||
2898 | static int skge_change_mtu(struct net_device *dev, int new_mtu) | 2898 | static int skge_change_mtu(struct net_device *dev, int new_mtu) |
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 912308eec865..329f890e2903 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -369,7 +369,7 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, | |||
369 | * MN10300/AM33 configuration | 369 | * MN10300/AM33 configuration |
370 | */ | 370 | */ |
371 | 371 | ||
372 | #include <asm/unit/smc91111.h> | 372 | #include <unit/smc91111.h> |
373 | 373 | ||
374 | #else | 374 | #else |
375 | 375 | ||
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 6da678129828..eb7db032a780 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
@@ -317,7 +317,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx) | |||
317 | goto out; | 317 | goto out; |
318 | } | 318 | } |
319 | 319 | ||
320 | SMSC_WARNING(HW, "Timed out waiting for MII write to finish"); | 320 | SMSC_WARNING(HW, "Timed out waiting for MII read to finish"); |
321 | reg = -EIO; | 321 | reg = -EIO; |
322 | 322 | ||
323 | out: | 323 | out: |
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index e0d84772771c..a39c0b9ba8b6 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c | |||
@@ -331,6 +331,18 @@ out: | |||
331 | return ERR_PTR(err); | 331 | return ERR_PTR(err); |
332 | } | 332 | } |
333 | 333 | ||
334 | static const struct net_device_ops sun3_82586_netdev_ops = { | ||
335 | .ndo_open = sun3_82586_open, | ||
336 | .ndo_stop = sun3_82586_close, | ||
337 | .ndo_start_xmit = sun3_82586_send_packet, | ||
338 | .ndo_set_multicast_list = set_multicast_list, | ||
339 | .ndo_tx_timeout = sun3_82586_timeout, | ||
340 | .ndo_get_stats = sun3_82586_get_stats, | ||
341 | .ndo_validate_addr = eth_validate_addr, | ||
342 | .ndo_set_mac_address = eth_mac_addr, | ||
343 | .ndo_change_mtu = eth_change_mtu, | ||
344 | }; | ||
345 | |||
334 | static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) | 346 | static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) |
335 | { | 347 | { |
336 | int i, size, retval; | 348 | int i, size, retval; |
@@ -381,13 +393,8 @@ static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) | |||
381 | 393 | ||
382 | printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); | 394 | printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); |
383 | 395 | ||
384 | dev->open = sun3_82586_open; | 396 | dev->netdev_ops = &sun3_82586_netdev_ops; |
385 | dev->stop = sun3_82586_close; | ||
386 | dev->get_stats = sun3_82586_get_stats; | ||
387 | dev->tx_timeout = sun3_82586_timeout; | ||
388 | dev->watchdog_timeo = HZ/20; | 397 | dev->watchdog_timeo = HZ/20; |
389 | dev->hard_start_xmit = sun3_82586_send_packet; | ||
390 | dev->set_multicast_list = set_multicast_list; | ||
391 | 398 | ||
392 | dev->if_port = 0; | 399 | dev->if_port = 0; |
393 | return 0; | 400 | return 0; |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index d91e95b237b7..0ce2db6ce2bf 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -862,6 +862,22 @@ static int __devinit tc35815_init_dev_addr(struct net_device *dev) | |||
862 | return 0; | 862 | return 0; |
863 | } | 863 | } |
864 | 864 | ||
865 | static const struct net_device_ops tc35815_netdev_ops = { | ||
866 | .ndo_open = tc35815_open, | ||
867 | .ndo_stop = tc35815_close, | ||
868 | .ndo_start_xmit = tc35815_send_packet, | ||
869 | .ndo_get_stats = tc35815_get_stats, | ||
870 | .ndo_set_multicast_list = tc35815_set_multicast_list, | ||
871 | .ndo_tx_timeout = tc35815_tx_timeout, | ||
872 | .ndo_do_ioctl = tc35815_ioctl, | ||
873 | .ndo_validate_addr = eth_validate_addr, | ||
874 | .ndo_change_mtu = eth_change_mtu, | ||
875 | .ndo_set_mac_address = eth_mac_addr, | ||
876 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
877 | .ndo_poll_controller = tc35815_poll_controller, | ||
878 | #endif | ||
879 | }; | ||
880 | |||
865 | static int __devinit tc35815_init_one(struct pci_dev *pdev, | 881 | static int __devinit tc35815_init_one(struct pci_dev *pdev, |
866 | const struct pci_device_id *ent) | 882 | const struct pci_device_id *ent) |
867 | { | 883 | { |
@@ -904,21 +920,12 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev, | |||
904 | ioaddr = pcim_iomap_table(pdev)[1]; | 920 | ioaddr = pcim_iomap_table(pdev)[1]; |
905 | 921 | ||
906 | /* Initialize the device structure. */ | 922 | /* Initialize the device structure. */ |
907 | dev->open = tc35815_open; | 923 | dev->netdev_ops = &tc35815_netdev_ops; |
908 | dev->hard_start_xmit = tc35815_send_packet; | ||
909 | dev->stop = tc35815_close; | ||
910 | dev->get_stats = tc35815_get_stats; | ||
911 | dev->set_multicast_list = tc35815_set_multicast_list; | ||
912 | dev->do_ioctl = tc35815_ioctl; | ||
913 | dev->ethtool_ops = &tc35815_ethtool_ops; | 924 | dev->ethtool_ops = &tc35815_ethtool_ops; |
914 | dev->tx_timeout = tc35815_tx_timeout; | ||
915 | dev->watchdog_timeo = TC35815_TX_TIMEOUT; | 925 | dev->watchdog_timeo = TC35815_TX_TIMEOUT; |
916 | #ifdef TC35815_NAPI | 926 | #ifdef TC35815_NAPI |
917 | netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); | 927 | netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); |
918 | #endif | 928 | #endif |
919 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
920 | dev->poll_controller = tc35815_poll_controller; | ||
921 | #endif | ||
922 | 929 | ||
923 | dev->irq = pdev->irq; | 930 | dev->irq = pdev->irq; |
924 | dev->base_addr = (unsigned long)ioaddr; | 931 | dev->base_addr = (unsigned long)ioaddr; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6a736dda3ee2..7a837c465960 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -12443,8 +12443,13 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) | |||
12443 | /* Next, try NVRAM. */ | 12443 | /* Next, try NVRAM. */ |
12444 | if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && | 12444 | if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && |
12445 | !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { | 12445 | !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { |
12446 | memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); | 12446 | dev->dev_addr[0] = ((hi >> 16) & 0xff); |
12447 | memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); | 12447 | dev->dev_addr[1] = ((hi >> 24) & 0xff); |
12448 | dev->dev_addr[2] = ((lo >> 0) & 0xff); | ||
12449 | dev->dev_addr[3] = ((lo >> 8) & 0xff); | ||
12450 | dev->dev_addr[4] = ((lo >> 16) & 0xff); | ||
12451 | dev->dev_addr[5] = ((lo >> 24) & 0xff); | ||
12452 | |||
12448 | } | 12453 | } |
12449 | /* Finally just fetch it out of the MAC control regs. */ | 12454 | /* Finally just fetch it out of the MAC control regs. */ |
12450 | else { | 12455 | else { |
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index bb43e7fb2a50..0f78f99f9b20 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c | |||
@@ -1561,6 +1561,18 @@ static const struct ethtool_ops tsi108_ethtool_ops = { | |||
1561 | .set_settings = tsi108_set_settings, | 1561 | .set_settings = tsi108_set_settings, |
1562 | }; | 1562 | }; |
1563 | 1563 | ||
1564 | static const struct net_device_ops tsi108_netdev_ops = { | ||
1565 | .ndo_open = tsi108_open, | ||
1566 | .ndo_stop = tsi108_close, | ||
1567 | .ndo_start_xmit = tsi108_send_packet, | ||
1568 | .ndo_set_multicast_list = tsi108_set_rx_mode, | ||
1569 | .ndo_get_stats = tsi108_get_stats, | ||
1570 | .ndo_do_ioctl = tsi108_do_ioctl, | ||
1571 | .ndo_set_mac_address = tsi108_set_mac, | ||
1572 | .ndo_validate_addr = eth_validate_addr, | ||
1573 | .ndo_change_mtu = eth_change_mtu, | ||
1574 | }; | ||
1575 | |||
1564 | static int | 1576 | static int |
1565 | tsi108_init_one(struct platform_device *pdev) | 1577 | tsi108_init_one(struct platform_device *pdev) |
1566 | { | 1578 | { |
@@ -1616,14 +1628,8 @@ tsi108_init_one(struct platform_device *pdev) | |||
1616 | data->phy_type = einfo->phy_type; | 1628 | data->phy_type = einfo->phy_type; |
1617 | data->irq_num = einfo->irq_num; | 1629 | data->irq_num = einfo->irq_num; |
1618 | data->id = pdev->id; | 1630 | data->id = pdev->id; |
1619 | dev->open = tsi108_open; | ||
1620 | dev->stop = tsi108_close; | ||
1621 | dev->hard_start_xmit = tsi108_send_packet; | ||
1622 | dev->set_mac_address = tsi108_set_mac; | ||
1623 | dev->set_multicast_list = tsi108_set_rx_mode; | ||
1624 | dev->get_stats = tsi108_get_stats; | ||
1625 | netif_napi_add(dev, &data->napi, tsi108_poll, 64); | 1631 | netif_napi_add(dev, &data->napi, tsi108_poll, 64); |
1626 | dev->do_ioctl = tsi108_do_ioctl; | 1632 | dev->netdev_ops = &tsi108_netdev_ops; |
1627 | dev->ethtool_ops = &tsi108_ethtool_ops; | 1633 | dev->ethtool_ops = &tsi108_ethtool_ops; |
1628 | 1634 | ||
1629 | /* Apparently, the Linux networking code won't use scatter-gather | 1635 | /* Apparently, the Linux networking code won't use scatter-gather |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a1b0697340ba..16716aef184c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -518,7 +518,7 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun, | |||
518 | int err; | 518 | int err; |
519 | 519 | ||
520 | /* Under a page? Don't bother with paged skb. */ | 520 | /* Under a page? Don't bother with paged skb. */ |
521 | if (prepad + len < PAGE_SIZE) | 521 | if (prepad + len < PAGE_SIZE || !linear) |
522 | linear = len; | 522 | linear = len; |
523 | 523 | ||
524 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, | 524 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, |
@@ -565,7 +565,8 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, | |||
565 | 565 | ||
566 | if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { | 566 | if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { |
567 | align = NET_IP_ALIGN; | 567 | align = NET_IP_ALIGN; |
568 | if (unlikely(len < ETH_HLEN)) | 568 | if (unlikely(len < ETH_HLEN || |
569 | (gso.hdr_len && gso.hdr_len < ETH_HLEN))) | ||
569 | return -EINVAL; | 570 | return -EINVAL; |
570 | } | 571 | } |
571 | 572 | ||
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index fb53ef872df3..754a4b182c1d 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -377,7 +377,7 @@ static void velocity_print_info(struct velocity_info *vptr); | |||
377 | static int velocity_open(struct net_device *dev); | 377 | static int velocity_open(struct net_device *dev); |
378 | static int velocity_change_mtu(struct net_device *dev, int mtu); | 378 | static int velocity_change_mtu(struct net_device *dev, int mtu); |
379 | static int velocity_xmit(struct sk_buff *skb, struct net_device *dev); | 379 | static int velocity_xmit(struct sk_buff *skb, struct net_device *dev); |
380 | static int velocity_intr(int irq, void *dev_instance); | 380 | static irqreturn_t velocity_intr(int irq, void *dev_instance); |
381 | static void velocity_set_multi(struct net_device *dev); | 381 | static void velocity_set_multi(struct net_device *dev); |
382 | static struct net_device_stats *velocity_get_stats(struct net_device *dev); | 382 | static struct net_device_stats *velocity_get_stats(struct net_device *dev); |
383 | static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 383 | static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
@@ -2215,7 +2215,7 @@ out: | |||
2215 | * efficiently as possible. | 2215 | * efficiently as possible. |
2216 | */ | 2216 | */ |
2217 | 2217 | ||
2218 | static int velocity_intr(int irq, void *dev_instance) | 2218 | static irqreturn_t velocity_intr(int irq, void *dev_instance) |
2219 | { | 2219 | { |
2220 | struct net_device *dev = dev_instance; | 2220 | struct net_device *dev = dev_instance; |
2221 | struct velocity_info *vptr = netdev_priv(dev); | 2221 | struct velocity_info *vptr = netdev_priv(dev); |
diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c index 6dbc58580abb..168411d322a2 100644 --- a/drivers/net/wireless/ath9k/pci.c +++ b/drivers/net/wireless/ath9k/pci.c | |||
@@ -93,14 +93,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
93 | if (pci_enable_device(pdev)) | 93 | if (pci_enable_device(pdev)) |
94 | return -EIO; | 94 | return -EIO; |
95 | 95 | ||
96 | ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 96 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
97 | 97 | ||
98 | if (ret) { | 98 | if (ret) { |
99 | printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); | 99 | printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); |
100 | goto bad; | 100 | goto bad; |
101 | } | 101 | } |
102 | 102 | ||
103 | ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | 103 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
104 | 104 | ||
105 | if (ret) { | 105 | if (ret) { |
106 | printk(KERN_ERR "ath9k: 32-bit DMA consistent " | 106 | printk(KERN_ERR "ath9k: 32-bit DMA consistent " |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index e3569a0a952d..b1610ea4bb3d 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c | |||
@@ -492,8 +492,8 @@ static int __devinit p54p_probe(struct pci_dev *pdev, | |||
492 | goto err_disable_dev; | 492 | goto err_disable_dev; |
493 | } | 493 | } |
494 | 494 | ||
495 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || | 495 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || |
496 | pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { | 496 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { |
497 | dev_err(&pdev->dev, "No suitable DMA available\n"); | 497 | dev_err(&pdev->dev, "No suitable DMA available\n"); |
498 | goto err_free_reg; | 498 | goto err_free_reg; |
499 | } | 499 | } |
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c index a12a7211c982..5a4ad156f63e 100644 --- a/drivers/net/xtsonic.c +++ b/drivers/net/xtsonic.c | |||
@@ -108,6 +108,18 @@ static int xtsonic_close(struct net_device *dev) | |||
108 | return err; | 108 | return err; |
109 | } | 109 | } |
110 | 110 | ||
111 | static const struct net_device_ops xtsonic_netdev_ops = { | ||
112 | .ndo_open = xtsonic_open, | ||
113 | .ndo_stop = xtsonic_close, | ||
114 | .ndo_start_xmit = sonic_send_packet, | ||
115 | .ndo_get_stats = sonic_get_stats, | ||
116 | .ndo_set_multicast_list = sonic_multicast_list, | ||
117 | .ndo_tx_timeout = sonic_tx_timeout, | ||
118 | .ndo_validate_addr = eth_validate_addr, | ||
119 | .ndo_change_mtu = eth_change_mtu, | ||
120 | .ndo_set_mac_address = eth_mac_addr, | ||
121 | }; | ||
122 | |||
111 | static int __init sonic_probe1(struct net_device *dev) | 123 | static int __init sonic_probe1(struct net_device *dev) |
112 | { | 124 | { |
113 | static unsigned version_printed = 0; | 125 | static unsigned version_printed = 0; |
@@ -205,12 +217,7 @@ static int __init sonic_probe1(struct net_device *dev) | |||
205 | lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS | 217 | lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS |
206 | * SONIC_BUS_SCALE(lp->dma_bitmode)); | 218 | * SONIC_BUS_SCALE(lp->dma_bitmode)); |
207 | 219 | ||
208 | dev->open = xtsonic_open; | 220 | dev->netdev_ops = &xtsonic_netdev_ops; |
209 | dev->stop = xtsonic_close; | ||
210 | dev->hard_start_xmit = sonic_send_packet; | ||
211 | dev->get_stats = sonic_get_stats; | ||
212 | dev->set_multicast_list = &sonic_multicast_list; | ||
213 | dev->tx_timeout = sonic_tx_timeout; | ||
214 | dev->watchdog_timeo = TX_TIMEOUT; | 221 | dev->watchdog_timeo = TX_TIMEOUT; |
215 | 222 | ||
216 | /* | 223 | /* |
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index 4fa3bb2ddfe4..33e5ade774ca 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c | |||
@@ -434,7 +434,8 @@ static void __init superio_parport_init(void) | |||
434 | 0 /*base_hi*/, | 434 | 0 /*base_hi*/, |
435 | PAR_IRQ, | 435 | PAR_IRQ, |
436 | PARPORT_DMA_NONE /* dma */, | 436 | PARPORT_DMA_NONE /* dma */, |
437 | NULL /*struct pci_dev* */) ) | 437 | NULL /*struct pci_dev* */), |
438 | 0 /* shared irq flags */ ) | ||
438 | 439 | ||
439 | printk(KERN_WARNING PFX "Probing parallel port failed.\n"); | 440 | printk(KERN_WARNING PFX "Probing parallel port failed.\n"); |
440 | #endif /* CONFIG_PARPORT_PC */ | 441 | #endif /* CONFIG_PARPORT_PC */ |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 25a00ce4f24d..fa3a11365ec3 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -173,12 +173,21 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
173 | struct dmar_drhd_unit *dmaru; | 173 | struct dmar_drhd_unit *dmaru; |
174 | int ret = 0; | 174 | int ret = 0; |
175 | 175 | ||
176 | drhd = (struct acpi_dmar_hardware_unit *)header; | ||
177 | if (!drhd->address) { | ||
178 | /* Promote an attitude of violence to a BIOS engineer today */ | ||
179 | WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" | ||
180 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
181 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
182 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
183 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
184 | return -ENODEV; | ||
185 | } | ||
176 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); | 186 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); |
177 | if (!dmaru) | 187 | if (!dmaru) |
178 | return -ENOMEM; | 188 | return -ENOMEM; |
179 | 189 | ||
180 | dmaru->hdr = header; | 190 | dmaru->hdr = header; |
181 | drhd = (struct acpi_dmar_hardware_unit *)header; | ||
182 | dmaru->reg_base_addr = drhd->address; | 191 | dmaru->reg_base_addr = drhd->address; |
183 | dmaru->segment = drhd->segment; | 192 | dmaru->segment = drhd->segment; |
184 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | 193 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index fb3a3f3fca7a..001b328adf80 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -733,8 +733,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) | |||
733 | start &= (((u64)1) << addr_width) - 1; | 733 | start &= (((u64)1) << addr_width) - 1; |
734 | end &= (((u64)1) << addr_width) - 1; | 734 | end &= (((u64)1) << addr_width) - 1; |
735 | /* in case it's partial page */ | 735 | /* in case it's partial page */ |
736 | start = PAGE_ALIGN(start); | 736 | start &= PAGE_MASK; |
737 | end &= PAGE_MASK; | 737 | end = PAGE_ALIGN(end); |
738 | npages = (end - start) / VTD_PAGE_SIZE; | 738 | npages = (end - start) / VTD_PAGE_SIZE; |
739 | 739 | ||
740 | /* we don't need lock here, nobody else touches the iova range */ | 740 | /* we don't need lock here, nobody else touches the iova range */ |
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 45940f31fe9e..218b9a16ac3f 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c | |||
@@ -174,8 +174,7 @@ struct fujitsu_hotkey_t { | |||
174 | 174 | ||
175 | static struct fujitsu_hotkey_t *fujitsu_hotkey; | 175 | static struct fujitsu_hotkey_t *fujitsu_hotkey; |
176 | 176 | ||
177 | static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, | 177 | static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event); |
178 | void *data); | ||
179 | 178 | ||
180 | #ifdef CONFIG_LEDS_CLASS | 179 | #ifdef CONFIG_LEDS_CLASS |
181 | static enum led_brightness logolamp_get(struct led_classdev *cdev); | 180 | static enum led_brightness logolamp_get(struct led_classdev *cdev); |
@@ -203,7 +202,7 @@ struct led_classdev kblamps_led = { | |||
203 | static u32 dbg_level = 0x03; | 202 | static u32 dbg_level = 0x03; |
204 | #endif | 203 | #endif |
205 | 204 | ||
206 | static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data); | 205 | static void acpi_fujitsu_notify(struct acpi_device *device, u32 event); |
207 | 206 | ||
208 | /* Fujitsu ACPI interface function */ | 207 | /* Fujitsu ACPI interface function */ |
209 | 208 | ||
@@ -658,7 +657,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = { | |||
658 | 657 | ||
659 | static int acpi_fujitsu_add(struct acpi_device *device) | 658 | static int acpi_fujitsu_add(struct acpi_device *device) |
660 | { | 659 | { |
661 | acpi_status status; | ||
662 | acpi_handle handle; | 660 | acpi_handle handle; |
663 | int result = 0; | 661 | int result = 0; |
664 | int state = 0; | 662 | int state = 0; |
@@ -673,20 +671,10 @@ static int acpi_fujitsu_add(struct acpi_device *device) | |||
673 | sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); | 671 | sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); |
674 | device->driver_data = fujitsu; | 672 | device->driver_data = fujitsu; |
675 | 673 | ||
676 | status = acpi_install_notify_handler(device->handle, | ||
677 | ACPI_DEVICE_NOTIFY, | ||
678 | acpi_fujitsu_notify, fujitsu); | ||
679 | |||
680 | if (ACPI_FAILURE(status)) { | ||
681 | printk(KERN_ERR "Error installing notify handler\n"); | ||
682 | error = -ENODEV; | ||
683 | goto err_stop; | ||
684 | } | ||
685 | |||
686 | fujitsu->input = input = input_allocate_device(); | 674 | fujitsu->input = input = input_allocate_device(); |
687 | if (!input) { | 675 | if (!input) { |
688 | error = -ENOMEM; | 676 | error = -ENOMEM; |
689 | goto err_uninstall_notify; | 677 | goto err_stop; |
690 | } | 678 | } |
691 | 679 | ||
692 | snprintf(fujitsu->phys, sizeof(fujitsu->phys), | 680 | snprintf(fujitsu->phys, sizeof(fujitsu->phys), |
@@ -743,9 +731,6 @@ static int acpi_fujitsu_add(struct acpi_device *device) | |||
743 | end: | 731 | end: |
744 | err_free_input_dev: | 732 | err_free_input_dev: |
745 | input_free_device(input); | 733 | input_free_device(input); |
746 | err_uninstall_notify: | ||
747 | acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, | ||
748 | acpi_fujitsu_notify); | ||
749 | err_stop: | 734 | err_stop: |
750 | 735 | ||
751 | return result; | 736 | return result; |
@@ -753,7 +738,6 @@ err_stop: | |||
753 | 738 | ||
754 | static int acpi_fujitsu_remove(struct acpi_device *device, int type) | 739 | static int acpi_fujitsu_remove(struct acpi_device *device, int type) |
755 | { | 740 | { |
756 | acpi_status status; | ||
757 | struct fujitsu_t *fujitsu = NULL; | 741 | struct fujitsu_t *fujitsu = NULL; |
758 | 742 | ||
759 | if (!device || !acpi_driver_data(device)) | 743 | if (!device || !acpi_driver_data(device)) |
@@ -761,10 +745,6 @@ static int acpi_fujitsu_remove(struct acpi_device *device, int type) | |||
761 | 745 | ||
762 | fujitsu = acpi_driver_data(device); | 746 | fujitsu = acpi_driver_data(device); |
763 | 747 | ||
764 | status = acpi_remove_notify_handler(fujitsu->acpi_handle, | ||
765 | ACPI_DEVICE_NOTIFY, | ||
766 | acpi_fujitsu_notify); | ||
767 | |||
768 | if (!device || !acpi_driver_data(device)) | 748 | if (!device || !acpi_driver_data(device)) |
769 | return -EINVAL; | 749 | return -EINVAL; |
770 | 750 | ||
@@ -775,7 +755,7 @@ static int acpi_fujitsu_remove(struct acpi_device *device, int type) | |||
775 | 755 | ||
776 | /* Brightness notify */ | 756 | /* Brightness notify */ |
777 | 757 | ||
778 | static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) | 758 | static void acpi_fujitsu_notify(struct acpi_device *device, u32 event) |
779 | { | 759 | { |
780 | struct input_dev *input; | 760 | struct input_dev *input; |
781 | int keycode; | 761 | int keycode; |
@@ -829,15 +809,12 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) | |||
829 | input_report_key(input, keycode, 0); | 809 | input_report_key(input, keycode, 0); |
830 | input_sync(input); | 810 | input_sync(input); |
831 | } | 811 | } |
832 | |||
833 | return; | ||
834 | } | 812 | } |
835 | 813 | ||
836 | /* ACPI device for hotkey handling */ | 814 | /* ACPI device for hotkey handling */ |
837 | 815 | ||
838 | static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | 816 | static int acpi_fujitsu_hotkey_add(struct acpi_device *device) |
839 | { | 817 | { |
840 | acpi_status status; | ||
841 | acpi_handle handle; | 818 | acpi_handle handle; |
842 | int result = 0; | 819 | int result = 0; |
843 | int state = 0; | 820 | int state = 0; |
@@ -854,17 +831,6 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
854 | sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); | 831 | sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); |
855 | device->driver_data = fujitsu_hotkey; | 832 | device->driver_data = fujitsu_hotkey; |
856 | 833 | ||
857 | status = acpi_install_notify_handler(device->handle, | ||
858 | ACPI_DEVICE_NOTIFY, | ||
859 | acpi_fujitsu_hotkey_notify, | ||
860 | fujitsu_hotkey); | ||
861 | |||
862 | if (ACPI_FAILURE(status)) { | ||
863 | printk(KERN_ERR "Error installing notify handler\n"); | ||
864 | error = -ENODEV; | ||
865 | goto err_stop; | ||
866 | } | ||
867 | |||
868 | /* kfifo */ | 834 | /* kfifo */ |
869 | spin_lock_init(&fujitsu_hotkey->fifo_lock); | 835 | spin_lock_init(&fujitsu_hotkey->fifo_lock); |
870 | fujitsu_hotkey->fifo = | 836 | fujitsu_hotkey->fifo = |
@@ -879,7 +845,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
879 | fujitsu_hotkey->input = input = input_allocate_device(); | 845 | fujitsu_hotkey->input = input = input_allocate_device(); |
880 | if (!input) { | 846 | if (!input) { |
881 | error = -ENOMEM; | 847 | error = -ENOMEM; |
882 | goto err_uninstall_notify; | 848 | goto err_free_fifo; |
883 | } | 849 | } |
884 | 850 | ||
885 | snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), | 851 | snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), |
@@ -975,9 +941,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
975 | end: | 941 | end: |
976 | err_free_input_dev: | 942 | err_free_input_dev: |
977 | input_free_device(input); | 943 | input_free_device(input); |
978 | err_uninstall_notify: | 944 | err_free_fifo: |
979 | acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, | ||
980 | acpi_fujitsu_hotkey_notify); | ||
981 | kfifo_free(fujitsu_hotkey->fifo); | 945 | kfifo_free(fujitsu_hotkey->fifo); |
982 | err_stop: | 946 | err_stop: |
983 | 947 | ||
@@ -986,7 +950,6 @@ err_stop: | |||
986 | 950 | ||
987 | static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) | 951 | static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) |
988 | { | 952 | { |
989 | acpi_status status; | ||
990 | struct fujitsu_hotkey_t *fujitsu_hotkey = NULL; | 953 | struct fujitsu_hotkey_t *fujitsu_hotkey = NULL; |
991 | 954 | ||
992 | if (!device || !acpi_driver_data(device)) | 955 | if (!device || !acpi_driver_data(device)) |
@@ -994,10 +957,6 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) | |||
994 | 957 | ||
995 | fujitsu_hotkey = acpi_driver_data(device); | 958 | fujitsu_hotkey = acpi_driver_data(device); |
996 | 959 | ||
997 | status = acpi_remove_notify_handler(fujitsu_hotkey->acpi_handle, | ||
998 | ACPI_DEVICE_NOTIFY, | ||
999 | acpi_fujitsu_hotkey_notify); | ||
1000 | |||
1001 | fujitsu_hotkey->acpi_handle = NULL; | 960 | fujitsu_hotkey->acpi_handle = NULL; |
1002 | 961 | ||
1003 | kfifo_free(fujitsu_hotkey->fifo); | 962 | kfifo_free(fujitsu_hotkey->fifo); |
@@ -1005,8 +964,7 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) | |||
1005 | return 0; | 964 | return 0; |
1006 | } | 965 | } |
1007 | 966 | ||
1008 | static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, | 967 | static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) |
1009 | void *data) | ||
1010 | { | 968 | { |
1011 | struct input_dev *input; | 969 | struct input_dev *input; |
1012 | int keycode, keycode_r; | 970 | int keycode, keycode_r; |
@@ -1089,8 +1047,6 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, | |||
1089 | input_sync(input); | 1047 | input_sync(input); |
1090 | break; | 1048 | break; |
1091 | } | 1049 | } |
1092 | |||
1093 | return; | ||
1094 | } | 1050 | } |
1095 | 1051 | ||
1096 | /* Initialization */ | 1052 | /* Initialization */ |
@@ -1107,6 +1063,7 @@ static struct acpi_driver acpi_fujitsu_driver = { | |||
1107 | .ops = { | 1063 | .ops = { |
1108 | .add = acpi_fujitsu_add, | 1064 | .add = acpi_fujitsu_add, |
1109 | .remove = acpi_fujitsu_remove, | 1065 | .remove = acpi_fujitsu_remove, |
1066 | .notify = acpi_fujitsu_notify, | ||
1110 | }, | 1067 | }, |
1111 | }; | 1068 | }; |
1112 | 1069 | ||
@@ -1122,6 +1079,7 @@ static struct acpi_driver acpi_fujitsu_hotkey_driver = { | |||
1122 | .ops = { | 1079 | .ops = { |
1123 | .add = acpi_fujitsu_hotkey_add, | 1080 | .add = acpi_fujitsu_hotkey_add, |
1124 | .remove = acpi_fujitsu_hotkey_remove, | 1081 | .remove = acpi_fujitsu_hotkey_remove, |
1082 | .notify = acpi_fujitsu_hotkey_notify, | ||
1125 | }, | 1083 | }, |
1126 | }; | 1084 | }; |
1127 | 1085 | ||
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index a5ce4bc202e3..fe7cf0188acc 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c | |||
@@ -176,6 +176,7 @@ enum SINF_BITS { SINF_NUM_BATTERIES = 0, | |||
176 | static int acpi_pcc_hotkey_add(struct acpi_device *device); | 176 | static int acpi_pcc_hotkey_add(struct acpi_device *device); |
177 | static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type); | 177 | static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type); |
178 | static int acpi_pcc_hotkey_resume(struct acpi_device *device); | 178 | static int acpi_pcc_hotkey_resume(struct acpi_device *device); |
179 | static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event); | ||
179 | 180 | ||
180 | static const struct acpi_device_id pcc_device_ids[] = { | 181 | static const struct acpi_device_id pcc_device_ids[] = { |
181 | { "MAT0012", 0}, | 182 | { "MAT0012", 0}, |
@@ -194,6 +195,7 @@ static struct acpi_driver acpi_pcc_driver = { | |||
194 | .add = acpi_pcc_hotkey_add, | 195 | .add = acpi_pcc_hotkey_add, |
195 | .remove = acpi_pcc_hotkey_remove, | 196 | .remove = acpi_pcc_hotkey_remove, |
196 | .resume = acpi_pcc_hotkey_resume, | 197 | .resume = acpi_pcc_hotkey_resume, |
198 | .notify = acpi_pcc_hotkey_notify, | ||
197 | }, | 199 | }, |
198 | }; | 200 | }; |
199 | 201 | ||
@@ -271,7 +273,7 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf) | |||
271 | union acpi_object *hkey = NULL; | 273 | union acpi_object *hkey = NULL; |
272 | int i; | 274 | int i; |
273 | 275 | ||
274 | status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0, | 276 | status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, NULL, |
275 | &buffer); | 277 | &buffer); |
276 | if (ACPI_FAILURE(status)) { | 278 | if (ACPI_FAILURE(status)) { |
277 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | 279 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, |
@@ -527,9 +529,9 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) | |||
527 | return; | 529 | return; |
528 | } | 530 | } |
529 | 531 | ||
530 | static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data) | 532 | static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event) |
531 | { | 533 | { |
532 | struct pcc_acpi *pcc = (struct pcc_acpi *) data; | 534 | struct pcc_acpi *pcc = acpi_driver_data(device); |
533 | 535 | ||
534 | switch (event) { | 536 | switch (event) { |
535 | case HKEY_NOTIFY: | 537 | case HKEY_NOTIFY: |
@@ -599,7 +601,6 @@ static int acpi_pcc_hotkey_resume(struct acpi_device *device) | |||
599 | 601 | ||
600 | static int acpi_pcc_hotkey_add(struct acpi_device *device) | 602 | static int acpi_pcc_hotkey_add(struct acpi_device *device) |
601 | { | 603 | { |
602 | acpi_status status; | ||
603 | struct pcc_acpi *pcc; | 604 | struct pcc_acpi *pcc; |
604 | int num_sifr, result; | 605 | int num_sifr, result; |
605 | 606 | ||
@@ -640,22 +641,11 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) | |||
640 | goto out_sinf; | 641 | goto out_sinf; |
641 | } | 642 | } |
642 | 643 | ||
643 | /* initialize hotkey input device */ | ||
644 | status = acpi_install_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, | ||
645 | acpi_pcc_hotkey_notify, pcc); | ||
646 | |||
647 | if (ACPI_FAILURE(status)) { | ||
648 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | ||
649 | "Error installing notify handler\n")); | ||
650 | result = -ENODEV; | ||
651 | goto out_input; | ||
652 | } | ||
653 | |||
654 | /* initialize backlight */ | 644 | /* initialize backlight */ |
655 | pcc->backlight = backlight_device_register("panasonic", NULL, pcc, | 645 | pcc->backlight = backlight_device_register("panasonic", NULL, pcc, |
656 | &pcc_backlight_ops); | 646 | &pcc_backlight_ops); |
657 | if (IS_ERR(pcc->backlight)) | 647 | if (IS_ERR(pcc->backlight)) |
658 | goto out_notify; | 648 | goto out_input; |
659 | 649 | ||
660 | if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) { | 650 | if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) { |
661 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | 651 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, |
@@ -680,9 +670,6 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) | |||
680 | 670 | ||
681 | out_backlight: | 671 | out_backlight: |
682 | backlight_device_unregister(pcc->backlight); | 672 | backlight_device_unregister(pcc->backlight); |
683 | out_notify: | ||
684 | acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, | ||
685 | acpi_pcc_hotkey_notify); | ||
686 | out_input: | 673 | out_input: |
687 | input_unregister_device(pcc->input_dev); | 674 | input_unregister_device(pcc->input_dev); |
688 | /* no need to input_free_device() since core input API refcount and | 675 | /* no need to input_free_device() since core input API refcount and |
@@ -723,9 +710,6 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type) | |||
723 | 710 | ||
724 | backlight_device_unregister(pcc->backlight); | 711 | backlight_device_unregister(pcc->backlight); |
725 | 712 | ||
726 | acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, | ||
727 | acpi_pcc_hotkey_notify); | ||
728 | |||
729 | input_unregister_device(pcc->input_dev); | 713 | input_unregister_device(pcc->input_dev); |
730 | /* no need to input_free_device() since core input API refcount and | 714 | /* no need to input_free_device() since core input API refcount and |
731 | * free()s the device */ | 715 | * free()s the device */ |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index a90ec5cb2f20..d3c92d777bde 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
@@ -914,7 +914,7 @@ static struct sony_nc_event sony_127_events[] = { | |||
914 | /* | 914 | /* |
915 | * ACPI callbacks | 915 | * ACPI callbacks |
916 | */ | 916 | */ |
917 | static void sony_acpi_notify(acpi_handle handle, u32 event, void *data) | 917 | static void sony_nc_notify(struct acpi_device *device, u32 event) |
918 | { | 918 | { |
919 | u32 ev = event; | 919 | u32 ev = event; |
920 | 920 | ||
@@ -933,7 +933,7 @@ static void sony_acpi_notify(acpi_handle handle, u32 event, void *data) | |||
933 | struct sony_nc_event *key_event; | 933 | struct sony_nc_event *key_event; |
934 | 934 | ||
935 | if (sony_call_snc_handle(key_handle, 0x200, &result)) { | 935 | if (sony_call_snc_handle(key_handle, 0x200, &result)) { |
936 | dprintk("sony_acpi_notify, unable to decode" | 936 | dprintk("sony_nc_notify, unable to decode" |
937 | " event 0x%.2x 0x%.2x\n", key_handle, | 937 | " event 0x%.2x 0x%.2x\n", key_handle, |
938 | ev); | 938 | ev); |
939 | /* restore the original event */ | 939 | /* restore the original event */ |
@@ -968,7 +968,7 @@ static void sony_acpi_notify(acpi_handle handle, u32 event, void *data) | |||
968 | } else | 968 | } else |
969 | sony_laptop_report_input_event(ev); | 969 | sony_laptop_report_input_event(ev); |
970 | 970 | ||
971 | dprintk("sony_acpi_notify, event: 0x%.2x\n", ev); | 971 | dprintk("sony_nc_notify, event: 0x%.2x\n", ev); |
972 | acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev); | 972 | acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev); |
973 | } | 973 | } |
974 | 974 | ||
@@ -1276,15 +1276,6 @@ static int sony_nc_add(struct acpi_device *device) | |||
1276 | goto outwalk; | 1276 | goto outwalk; |
1277 | } | 1277 | } |
1278 | 1278 | ||
1279 | status = acpi_install_notify_handler(sony_nc_acpi_handle, | ||
1280 | ACPI_DEVICE_NOTIFY, | ||
1281 | sony_acpi_notify, NULL); | ||
1282 | if (ACPI_FAILURE(status)) { | ||
1283 | printk(KERN_WARNING DRV_PFX "unable to install notify handler (%u)\n", status); | ||
1284 | result = -ENODEV; | ||
1285 | goto outinput; | ||
1286 | } | ||
1287 | |||
1288 | if (acpi_video_backlight_support()) { | 1279 | if (acpi_video_backlight_support()) { |
1289 | printk(KERN_INFO DRV_PFX "brightness ignored, must be " | 1280 | printk(KERN_INFO DRV_PFX "brightness ignored, must be " |
1290 | "controlled by ACPI video driver\n"); | 1281 | "controlled by ACPI video driver\n"); |
@@ -1362,13 +1353,6 @@ static int sony_nc_add(struct acpi_device *device) | |||
1362 | if (sony_backlight_device) | 1353 | if (sony_backlight_device) |
1363 | backlight_device_unregister(sony_backlight_device); | 1354 | backlight_device_unregister(sony_backlight_device); |
1364 | 1355 | ||
1365 | status = acpi_remove_notify_handler(sony_nc_acpi_handle, | ||
1366 | ACPI_DEVICE_NOTIFY, | ||
1367 | sony_acpi_notify); | ||
1368 | if (ACPI_FAILURE(status)) | ||
1369 | printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n"); | ||
1370 | |||
1371 | outinput: | ||
1372 | sony_laptop_remove_input(); | 1356 | sony_laptop_remove_input(); |
1373 | 1357 | ||
1374 | outwalk: | 1358 | outwalk: |
@@ -1378,7 +1362,6 @@ static int sony_nc_add(struct acpi_device *device) | |||
1378 | 1362 | ||
1379 | static int sony_nc_remove(struct acpi_device *device, int type) | 1363 | static int sony_nc_remove(struct acpi_device *device, int type) |
1380 | { | 1364 | { |
1381 | acpi_status status; | ||
1382 | struct sony_nc_value *item; | 1365 | struct sony_nc_value *item; |
1383 | 1366 | ||
1384 | if (sony_backlight_device) | 1367 | if (sony_backlight_device) |
@@ -1386,12 +1369,6 @@ static int sony_nc_remove(struct acpi_device *device, int type) | |||
1386 | 1369 | ||
1387 | sony_nc_acpi_device = NULL; | 1370 | sony_nc_acpi_device = NULL; |
1388 | 1371 | ||
1389 | status = acpi_remove_notify_handler(sony_nc_acpi_handle, | ||
1390 | ACPI_DEVICE_NOTIFY, | ||
1391 | sony_acpi_notify); | ||
1392 | if (ACPI_FAILURE(status)) | ||
1393 | printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n"); | ||
1394 | |||
1395 | for (item = sony_nc_values; item->name; ++item) { | 1372 | for (item = sony_nc_values; item->name; ++item) { |
1396 | device_remove_file(&sony_pf_device->dev, &item->devattr); | 1373 | device_remove_file(&sony_pf_device->dev, &item->devattr); |
1397 | } | 1374 | } |
@@ -1425,6 +1402,7 @@ static struct acpi_driver sony_nc_driver = { | |||
1425 | .add = sony_nc_add, | 1402 | .add = sony_nc_add, |
1426 | .remove = sony_nc_remove, | 1403 | .remove = sony_nc_remove, |
1427 | .resume = sony_nc_resume, | 1404 | .resume = sony_nc_resume, |
1405 | .notify = sony_nc_notify, | ||
1428 | }, | 1406 | }, |
1429 | }; | 1407 | }; |
1430 | 1408 | ||
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 2f269e117b8f..043b208d971d 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -81,6 +81,7 @@ static struct wmi_block wmi_blocks; | |||
81 | 81 | ||
82 | static int acpi_wmi_remove(struct acpi_device *device, int type); | 82 | static int acpi_wmi_remove(struct acpi_device *device, int type); |
83 | static int acpi_wmi_add(struct acpi_device *device); | 83 | static int acpi_wmi_add(struct acpi_device *device); |
84 | static void acpi_wmi_notify(struct acpi_device *device, u32 event); | ||
84 | 85 | ||
85 | static const struct acpi_device_id wmi_device_ids[] = { | 86 | static const struct acpi_device_id wmi_device_ids[] = { |
86 | {"PNP0C14", 0}, | 87 | {"PNP0C14", 0}, |
@@ -96,6 +97,7 @@ static struct acpi_driver acpi_wmi_driver = { | |||
96 | .ops = { | 97 | .ops = { |
97 | .add = acpi_wmi_add, | 98 | .add = acpi_wmi_add, |
98 | .remove = acpi_wmi_remove, | 99 | .remove = acpi_wmi_remove, |
100 | .notify = acpi_wmi_notify, | ||
99 | }, | 101 | }, |
100 | }; | 102 | }; |
101 | 103 | ||
@@ -643,12 +645,11 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address, | |||
643 | } | 645 | } |
644 | } | 646 | } |
645 | 647 | ||
646 | static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data) | 648 | static void acpi_wmi_notify(struct acpi_device *device, u32 event) |
647 | { | 649 | { |
648 | struct guid_block *block; | 650 | struct guid_block *block; |
649 | struct wmi_block *wblock; | 651 | struct wmi_block *wblock; |
650 | struct list_head *p; | 652 | struct list_head *p; |
651 | struct acpi_device *device = data; | ||
652 | 653 | ||
653 | list_for_each(p, &wmi_blocks.list) { | 654 | list_for_each(p, &wmi_blocks.list) { |
654 | wblock = list_entry(p, struct wmi_block, list); | 655 | wblock = list_entry(p, struct wmi_block, list); |
@@ -669,9 +670,6 @@ static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data) | |||
669 | 670 | ||
670 | static int acpi_wmi_remove(struct acpi_device *device, int type) | 671 | static int acpi_wmi_remove(struct acpi_device *device, int type) |
671 | { | 672 | { |
672 | acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, | ||
673 | acpi_wmi_notify); | ||
674 | |||
675 | acpi_remove_address_space_handler(device->handle, | 673 | acpi_remove_address_space_handler(device->handle, |
676 | ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler); | 674 | ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler); |
677 | 675 | ||
@@ -683,13 +681,6 @@ static int __init acpi_wmi_add(struct acpi_device *device) | |||
683 | acpi_status status; | 681 | acpi_status status; |
684 | int result = 0; | 682 | int result = 0; |
685 | 683 | ||
686 | status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, | ||
687 | acpi_wmi_notify, device); | ||
688 | if (ACPI_FAILURE(status)) { | ||
689 | printk(KERN_ERR PREFIX "Error installing notify handler\n"); | ||
690 | return -ENODEV; | ||
691 | } | ||
692 | |||
693 | status = acpi_install_address_space_handler(device->handle, | 684 | status = acpi_install_address_space_handler(device->handle, |
694 | ACPI_ADR_SPACE_EC, | 685 | ACPI_ADR_SPACE_EC, |
695 | &acpi_wmi_ec_space_handler, | 686 | &acpi_wmi_ec_space_handler, |
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c index 41aec2acbb91..e8b278f71781 100644 --- a/drivers/power/pcf50633-charger.c +++ b/drivers/power/pcf50633-charger.c | |||
@@ -36,6 +36,8 @@ struct pcf50633_mbc { | |||
36 | 36 | ||
37 | struct power_supply usb; | 37 | struct power_supply usb; |
38 | struct power_supply adapter; | 38 | struct power_supply adapter; |
39 | |||
40 | struct delayed_work charging_restart_work; | ||
39 | }; | 41 | }; |
40 | 42 | ||
41 | int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) | 43 | int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) |
@@ -43,6 +45,8 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) | |||
43 | struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); | 45 | struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); |
44 | int ret = 0; | 46 | int ret = 0; |
45 | u8 bits; | 47 | u8 bits; |
48 | int charging_start = 1; | ||
49 | u8 mbcs2, chgmod; | ||
46 | 50 | ||
47 | if (ma >= 1000) | 51 | if (ma >= 1000) |
48 | bits = PCF50633_MBCC7_USB_1000mA; | 52 | bits = PCF50633_MBCC7_USB_1000mA; |
@@ -50,8 +54,10 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) | |||
50 | bits = PCF50633_MBCC7_USB_500mA; | 54 | bits = PCF50633_MBCC7_USB_500mA; |
51 | else if (ma >= 100) | 55 | else if (ma >= 100) |
52 | bits = PCF50633_MBCC7_USB_100mA; | 56 | bits = PCF50633_MBCC7_USB_100mA; |
53 | else | 57 | else { |
54 | bits = PCF50633_MBCC7_USB_SUSPEND; | 58 | bits = PCF50633_MBCC7_USB_SUSPEND; |
59 | charging_start = 0; | ||
60 | } | ||
55 | 61 | ||
56 | ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7, | 62 | ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7, |
57 | PCF50633_MBCC7_USB_MASK, bits); | 63 | PCF50633_MBCC7_USB_MASK, bits); |
@@ -60,6 +66,22 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) | |||
60 | else | 66 | else |
61 | dev_info(pcf->dev, "usb curlim to %d mA\n", ma); | 67 | dev_info(pcf->dev, "usb curlim to %d mA\n", ma); |
62 | 68 | ||
69 | /* Manual charging start */ | ||
70 | mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); | ||
71 | chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); | ||
72 | |||
73 | /* If chgmod == BATFULL, setting chgena has no effect. | ||
74 | * We need to set resume instead. | ||
75 | */ | ||
76 | if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) | ||
77 | pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, | ||
78 | PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA); | ||
79 | else | ||
80 | pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, | ||
81 | PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); | ||
82 | |||
83 | mbc->usb_active = charging_start; | ||
84 | |||
63 | power_supply_changed(&mbc->usb); | 85 | power_supply_changed(&mbc->usb); |
64 | 86 | ||
65 | return ret; | 87 | return ret; |
@@ -84,21 +106,6 @@ int pcf50633_mbc_get_status(struct pcf50633 *pcf) | |||
84 | } | 106 | } |
85 | EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status); | 107 | EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status); |
86 | 108 | ||
87 | void pcf50633_mbc_set_status(struct pcf50633 *pcf, int what, int status) | ||
88 | { | ||
89 | struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); | ||
90 | |||
91 | if (what & PCF50633_MBC_USB_ONLINE) | ||
92 | mbc->usb_online = !!status; | ||
93 | if (what & PCF50633_MBC_USB_ACTIVE) | ||
94 | mbc->usb_active = !!status; | ||
95 | if (what & PCF50633_MBC_ADAPTER_ONLINE) | ||
96 | mbc->adapter_online = !!status; | ||
97 | if (what & PCF50633_MBC_ADAPTER_ACTIVE) | ||
98 | mbc->adapter_active = !!status; | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(pcf50633_mbc_set_status); | ||
101 | |||
102 | static ssize_t | 109 | static ssize_t |
103 | show_chgmode(struct device *dev, struct device_attribute *attr, char *buf) | 110 | show_chgmode(struct device *dev, struct device_attribute *attr, char *buf) |
104 | { | 111 | { |
@@ -160,10 +167,44 @@ static struct attribute_group mbc_attr_group = { | |||
160 | .attrs = pcf50633_mbc_sysfs_entries, | 167 | .attrs = pcf50633_mbc_sysfs_entries, |
161 | }; | 168 | }; |
162 | 169 | ||
170 | /* MBC state machine switches into charging mode when the battery voltage | ||
171 | * falls below 96% of a battery float voltage. But the voltage drop in Li-ion | ||
172 | * batteries is marginal(1~2 %) till about 80% of its capacity - which means, | ||
173 | * after a BATFULL, charging won't be restarted until 80%. | ||
174 | * | ||
175 | * This work_struct function restarts charging at regular intervals to make | ||
176 | * sure we don't discharge too much | ||
177 | */ | ||
178 | |||
179 | static void pcf50633_mbc_charging_restart(struct work_struct *work) | ||
180 | { | ||
181 | struct pcf50633_mbc *mbc; | ||
182 | u8 mbcs2, chgmod; | ||
183 | |||
184 | mbc = container_of(work, struct pcf50633_mbc, | ||
185 | charging_restart_work.work); | ||
186 | |||
187 | mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); | ||
188 | chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); | ||
189 | |||
190 | if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) | ||
191 | return; | ||
192 | |||
193 | /* Restart charging */ | ||
194 | pcf50633_reg_set_bit_mask(mbc->pcf, PCF50633_REG_MBCC1, | ||
195 | PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); | ||
196 | mbc->usb_active = 1; | ||
197 | power_supply_changed(&mbc->usb); | ||
198 | |||
199 | dev_info(mbc->pcf->dev, "Charging restarted\n"); | ||
200 | } | ||
201 | |||
163 | static void | 202 | static void |
164 | pcf50633_mbc_irq_handler(int irq, void *data) | 203 | pcf50633_mbc_irq_handler(int irq, void *data) |
165 | { | 204 | { |
166 | struct pcf50633_mbc *mbc = data; | 205 | struct pcf50633_mbc *mbc = data; |
206 | int chg_restart_interval = | ||
207 | mbc->pcf->pdata->charging_restart_interval; | ||
167 | 208 | ||
168 | /* USB */ | 209 | /* USB */ |
169 | if (irq == PCF50633_IRQ_USBINS) { | 210 | if (irq == PCF50633_IRQ_USBINS) { |
@@ -172,6 +213,7 @@ pcf50633_mbc_irq_handler(int irq, void *data) | |||
172 | mbc->usb_online = 0; | 213 | mbc->usb_online = 0; |
173 | mbc->usb_active = 0; | 214 | mbc->usb_active = 0; |
174 | pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); | 215 | pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); |
216 | cancel_delayed_work_sync(&mbc->charging_restart_work); | ||
175 | } | 217 | } |
176 | 218 | ||
177 | /* Adapter */ | 219 | /* Adapter */ |
@@ -186,7 +228,14 @@ pcf50633_mbc_irq_handler(int irq, void *data) | |||
186 | if (irq == PCF50633_IRQ_BATFULL) { | 228 | if (irq == PCF50633_IRQ_BATFULL) { |
187 | mbc->usb_active = 0; | 229 | mbc->usb_active = 0; |
188 | mbc->adapter_active = 0; | 230 | mbc->adapter_active = 0; |
189 | } | 231 | |
232 | if (chg_restart_interval > 0) | ||
233 | schedule_delayed_work(&mbc->charging_restart_work, | ||
234 | chg_restart_interval); | ||
235 | } else if (irq == PCF50633_IRQ_USBLIMON) | ||
236 | mbc->usb_active = 0; | ||
237 | else if (irq == PCF50633_IRQ_USBLIMOFF) | ||
238 | mbc->usb_active = 1; | ||
190 | 239 | ||
191 | power_supply_changed(&mbc->usb); | 240 | power_supply_changed(&mbc->usb); |
192 | power_supply_changed(&mbc->adapter); | 241 | power_supply_changed(&mbc->adapter); |
@@ -303,6 +352,9 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev) | |||
303 | return ret; | 352 | return ret; |
304 | } | 353 | } |
305 | 354 | ||
355 | INIT_DELAYED_WORK(&mbc->charging_restart_work, | ||
356 | pcf50633_mbc_charging_restart); | ||
357 | |||
306 | ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group); | 358 | ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group); |
307 | if (ret) | 359 | if (ret) |
308 | dev_err(mbc->pcf->dev, "failed to create sysfs entries\n"); | 360 | dev_err(mbc->pcf->dev, "failed to create sysfs entries\n"); |
@@ -328,6 +380,8 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev) | |||
328 | power_supply_unregister(&mbc->usb); | 380 | power_supply_unregister(&mbc->usb); |
329 | power_supply_unregister(&mbc->adapter); | 381 | power_supply_unregister(&mbc->adapter); |
330 | 382 | ||
383 | cancel_delayed_work_sync(&mbc->charging_restart_work); | ||
384 | |||
331 | kfree(mbc); | 385 | kfree(mbc); |
332 | 386 | ||
333 | return 0; | 387 | return 0; |
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c index b56a704409d2..a232de6a5703 100644 --- a/drivers/power/pda_power.c +++ b/drivers/power/pda_power.c | |||
@@ -12,11 +12,14 @@ | |||
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/err.h> | ||
15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
16 | #include <linux/power_supply.h> | 17 | #include <linux/power_supply.h> |
17 | #include <linux/pda_power.h> | 18 | #include <linux/pda_power.h> |
19 | #include <linux/regulator/consumer.h> | ||
18 | #include <linux/timer.h> | 20 | #include <linux/timer.h> |
19 | #include <linux/jiffies.h> | 21 | #include <linux/jiffies.h> |
22 | #include <linux/usb/otg.h> | ||
20 | 23 | ||
21 | static inline unsigned int get_irq_flags(struct resource *res) | 24 | static inline unsigned int get_irq_flags(struct resource *res) |
22 | { | 25 | { |
@@ -35,6 +38,11 @@ static struct timer_list supply_timer; | |||
35 | static struct timer_list polling_timer; | 38 | static struct timer_list polling_timer; |
36 | static int polling; | 39 | static int polling; |
37 | 40 | ||
41 | #ifdef CONFIG_USB_OTG_UTILS | ||
42 | static struct otg_transceiver *transceiver; | ||
43 | #endif | ||
44 | static struct regulator *ac_draw; | ||
45 | |||
38 | enum { | 46 | enum { |
39 | PDA_PSY_OFFLINE = 0, | 47 | PDA_PSY_OFFLINE = 0, |
40 | PDA_PSY_ONLINE = 1, | 48 | PDA_PSY_ONLINE = 1, |
@@ -104,18 +112,35 @@ static void update_status(void) | |||
104 | 112 | ||
105 | static void update_charger(void) | 113 | static void update_charger(void) |
106 | { | 114 | { |
107 | if (!pdata->set_charge) | 115 | static int regulator_enabled; |
108 | return; | 116 | int max_uA = pdata->ac_max_uA; |
109 | 117 | ||
110 | if (new_ac_status > 0) { | 118 | if (pdata->set_charge) { |
111 | dev_dbg(dev, "charger on (AC)\n"); | 119 | if (new_ac_status > 0) { |
112 | pdata->set_charge(PDA_POWER_CHARGE_AC); | 120 | dev_dbg(dev, "charger on (AC)\n"); |
113 | } else if (new_usb_status > 0) { | 121 | pdata->set_charge(PDA_POWER_CHARGE_AC); |
114 | dev_dbg(dev, "charger on (USB)\n"); | 122 | } else if (new_usb_status > 0) { |
115 | pdata->set_charge(PDA_POWER_CHARGE_USB); | 123 | dev_dbg(dev, "charger on (USB)\n"); |
116 | } else { | 124 | pdata->set_charge(PDA_POWER_CHARGE_USB); |
117 | dev_dbg(dev, "charger off\n"); | 125 | } else { |
118 | pdata->set_charge(0); | 126 | dev_dbg(dev, "charger off\n"); |
127 | pdata->set_charge(0); | ||
128 | } | ||
129 | } else if (ac_draw) { | ||
130 | if (new_ac_status > 0) { | ||
131 | regulator_set_current_limit(ac_draw, max_uA, max_uA); | ||
132 | if (!regulator_enabled) { | ||
133 | dev_dbg(dev, "charger on (AC)\n"); | ||
134 | regulator_enable(ac_draw); | ||
135 | regulator_enabled = 1; | ||
136 | } | ||
137 | } else { | ||
138 | if (regulator_enabled) { | ||
139 | dev_dbg(dev, "charger off\n"); | ||
140 | regulator_disable(ac_draw); | ||
141 | regulator_enabled = 0; | ||
142 | } | ||
143 | } | ||
119 | } | 144 | } |
120 | } | 145 | } |
121 | 146 | ||
@@ -194,6 +219,13 @@ static void polling_timer_func(unsigned long unused) | |||
194 | jiffies + msecs_to_jiffies(pdata->polling_interval)); | 219 | jiffies + msecs_to_jiffies(pdata->polling_interval)); |
195 | } | 220 | } |
196 | 221 | ||
222 | #ifdef CONFIG_USB_OTG_UTILS | ||
223 | static int otg_is_usb_online(void) | ||
224 | { | ||
225 | return (transceiver->state == OTG_STATE_B_PERIPHERAL); | ||
226 | } | ||
227 | #endif | ||
228 | |||
197 | static int pda_power_probe(struct platform_device *pdev) | 229 | static int pda_power_probe(struct platform_device *pdev) |
198 | { | 230 | { |
199 | int ret = 0; | 231 | int ret = 0; |
@@ -227,6 +259,9 @@ static int pda_power_probe(struct platform_device *pdev) | |||
227 | if (!pdata->polling_interval) | 259 | if (!pdata->polling_interval) |
228 | pdata->polling_interval = 2000; | 260 | pdata->polling_interval = 2000; |
229 | 261 | ||
262 | if (!pdata->ac_max_uA) | ||
263 | pdata->ac_max_uA = 500000; | ||
264 | |||
230 | setup_timer(&charger_timer, charger_timer_func, 0); | 265 | setup_timer(&charger_timer, charger_timer_func, 0); |
231 | setup_timer(&supply_timer, supply_timer_func, 0); | 266 | setup_timer(&supply_timer, supply_timer_func, 0); |
232 | 267 | ||
@@ -240,6 +275,13 @@ static int pda_power_probe(struct platform_device *pdev) | |||
240 | pda_psy_usb.num_supplicants = pdata->num_supplicants; | 275 | pda_psy_usb.num_supplicants = pdata->num_supplicants; |
241 | } | 276 | } |
242 | 277 | ||
278 | ac_draw = regulator_get(dev, "ac_draw"); | ||
279 | if (IS_ERR(ac_draw)) { | ||
280 | dev_dbg(dev, "couldn't get ac_draw regulator\n"); | ||
281 | ac_draw = NULL; | ||
282 | ret = PTR_ERR(ac_draw); | ||
283 | } | ||
284 | |||
243 | if (pdata->is_ac_online) { | 285 | if (pdata->is_ac_online) { |
244 | ret = power_supply_register(&pdev->dev, &pda_psy_ac); | 286 | ret = power_supply_register(&pdev->dev, &pda_psy_ac); |
245 | if (ret) { | 287 | if (ret) { |
@@ -261,6 +303,13 @@ static int pda_power_probe(struct platform_device *pdev) | |||
261 | } | 303 | } |
262 | } | 304 | } |
263 | 305 | ||
306 | #ifdef CONFIG_USB_OTG_UTILS | ||
307 | transceiver = otg_get_transceiver(); | ||
308 | if (transceiver && !pdata->is_usb_online) { | ||
309 | pdata->is_usb_online = otg_is_usb_online; | ||
310 | } | ||
311 | #endif | ||
312 | |||
264 | if (pdata->is_usb_online) { | 313 | if (pdata->is_usb_online) { |
265 | ret = power_supply_register(&pdev->dev, &pda_psy_usb); | 314 | ret = power_supply_register(&pdev->dev, &pda_psy_usb); |
266 | if (ret) { | 315 | if (ret) { |
@@ -300,10 +349,18 @@ usb_irq_failed: | |||
300 | usb_supply_failed: | 349 | usb_supply_failed: |
301 | if (pdata->is_ac_online && ac_irq) | 350 | if (pdata->is_ac_online && ac_irq) |
302 | free_irq(ac_irq->start, &pda_psy_ac); | 351 | free_irq(ac_irq->start, &pda_psy_ac); |
352 | #ifdef CONFIG_USB_OTG_UTILS | ||
353 | if (transceiver) | ||
354 | otg_put_transceiver(transceiver); | ||
355 | #endif | ||
303 | ac_irq_failed: | 356 | ac_irq_failed: |
304 | if (pdata->is_ac_online) | 357 | if (pdata->is_ac_online) |
305 | power_supply_unregister(&pda_psy_ac); | 358 | power_supply_unregister(&pda_psy_ac); |
306 | ac_supply_failed: | 359 | ac_supply_failed: |
360 | if (ac_draw) { | ||
361 | regulator_put(ac_draw); | ||
362 | ac_draw = NULL; | ||
363 | } | ||
307 | if (pdata->exit) | 364 | if (pdata->exit) |
308 | pdata->exit(dev); | 365 | pdata->exit(dev); |
309 | init_failed: | 366 | init_failed: |
@@ -327,6 +384,14 @@ static int pda_power_remove(struct platform_device *pdev) | |||
327 | power_supply_unregister(&pda_psy_usb); | 384 | power_supply_unregister(&pda_psy_usb); |
328 | if (pdata->is_ac_online) | 385 | if (pdata->is_ac_online) |
329 | power_supply_unregister(&pda_psy_ac); | 386 | power_supply_unregister(&pda_psy_ac); |
387 | #ifdef CONFIG_USB_OTG_UTILS | ||
388 | if (transceiver) | ||
389 | otg_put_transceiver(transceiver); | ||
390 | #endif | ||
391 | if (ac_draw) { | ||
392 | regulator_put(ac_draw); | ||
393 | ac_draw = NULL; | ||
394 | } | ||
330 | if (pdata->exit) | 395 | if (pdata->exit) |
331 | pdata->exit(dev); | 396 | pdata->exit(dev); |
332 | 397 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 0570794ccf1c..d1815272c435 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/buffer_head.h> | 21 | #include <linux/buffer_head.h> |
22 | #include <linux/hdreg.h> | 22 | #include <linux/hdreg.h> |
23 | #include <linux/async.h> | ||
23 | 24 | ||
24 | #include <asm/ccwdev.h> | 25 | #include <asm/ccwdev.h> |
25 | #include <asm/ebcdic.h> | 26 | #include <asm/ebcdic.h> |
@@ -480,8 +481,10 @@ static void dasd_change_state(struct dasd_device *device) | |||
480 | if (rc && rc != -EAGAIN) | 481 | if (rc && rc != -EAGAIN) |
481 | device->target = device->state; | 482 | device->target = device->state; |
482 | 483 | ||
483 | if (device->state == device->target) | 484 | if (device->state == device->target) { |
484 | wake_up(&dasd_init_waitq); | 485 | wake_up(&dasd_init_waitq); |
486 | dasd_put_device(device); | ||
487 | } | ||
485 | 488 | ||
486 | /* let user-space know that the device status changed */ | 489 | /* let user-space know that the device status changed */ |
487 | kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); | 490 | kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); |
@@ -513,12 +516,15 @@ void dasd_kick_device(struct dasd_device *device) | |||
513 | */ | 516 | */ |
514 | void dasd_set_target_state(struct dasd_device *device, int target) | 517 | void dasd_set_target_state(struct dasd_device *device, int target) |
515 | { | 518 | { |
519 | dasd_get_device(device); | ||
516 | /* If we are in probeonly mode stop at DASD_STATE_READY. */ | 520 | /* If we are in probeonly mode stop at DASD_STATE_READY. */ |
517 | if (dasd_probeonly && target > DASD_STATE_READY) | 521 | if (dasd_probeonly && target > DASD_STATE_READY) |
518 | target = DASD_STATE_READY; | 522 | target = DASD_STATE_READY; |
519 | if (device->target != target) { | 523 | if (device->target != target) { |
520 | if (device->state == target) | 524 | if (device->state == target) { |
521 | wake_up(&dasd_init_waitq); | 525 | wake_up(&dasd_init_waitq); |
526 | dasd_put_device(device); | ||
527 | } | ||
522 | device->target = target; | 528 | device->target = target; |
523 | } | 529 | } |
524 | if (device->state != device->target) | 530 | if (device->state != device->target) |
@@ -2148,6 +2154,22 @@ dasd_exit(void) | |||
2148 | * SECTION: common functions for ccw_driver use | 2154 | * SECTION: common functions for ccw_driver use |
2149 | */ | 2155 | */ |
2150 | 2156 | ||
2157 | static void dasd_generic_auto_online(void *data, async_cookie_t cookie) | ||
2158 | { | ||
2159 | struct ccw_device *cdev = data; | ||
2160 | int ret; | ||
2161 | |||
2162 | ret = ccw_device_set_online(cdev); | ||
2163 | if (ret) | ||
2164 | pr_warning("%s: Setting the DASD online failed with rc=%d\n", | ||
2165 | dev_name(&cdev->dev), ret); | ||
2166 | else { | ||
2167 | struct dasd_device *device = dasd_device_from_cdev(cdev); | ||
2168 | wait_event(dasd_init_waitq, _wait_for_device(device)); | ||
2169 | dasd_put_device(device); | ||
2170 | } | ||
2171 | } | ||
2172 | |||
2151 | /* | 2173 | /* |
2152 | * Initial attempt at a probe function. this can be simplified once | 2174 | * Initial attempt at a probe function. this can be simplified once |
2153 | * the other detection code is gone. | 2175 | * the other detection code is gone. |
@@ -2180,10 +2202,7 @@ int dasd_generic_probe(struct ccw_device *cdev, | |||
2180 | */ | 2202 | */ |
2181 | if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || | 2203 | if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || |
2182 | (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) | 2204 | (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) |
2183 | ret = ccw_device_set_online(cdev); | 2205 | async_schedule(dasd_generic_auto_online, cdev); |
2184 | if (ret) | ||
2185 | pr_warning("%s: Setting the DASD online failed with rc=%d\n", | ||
2186 | dev_name(&cdev->dev), ret); | ||
2187 | return 0; | 2206 | return 0; |
2188 | } | 2207 | } |
2189 | 2208 | ||
@@ -2290,13 +2309,7 @@ int dasd_generic_set_online(struct ccw_device *cdev, | |||
2290 | } else | 2309 | } else |
2291 | pr_debug("dasd_generic device %s found\n", | 2310 | pr_debug("dasd_generic device %s found\n", |
2292 | dev_name(&cdev->dev)); | 2311 | dev_name(&cdev->dev)); |
2293 | |||
2294 | /* FIXME: we have to wait for the root device but we don't want | ||
2295 | * to wait for each single device but for all at once. */ | ||
2296 | wait_event(dasd_init_waitq, _wait_for_device(device)); | ||
2297 | |||
2298 | dasd_put_device(device); | 2312 | dasd_put_device(device); |
2299 | |||
2300 | return rc; | 2313 | return rc; |
2301 | } | 2314 | } |
2302 | 2315 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 21254793c604..cb52da033f06 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -2019,15 +2019,23 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( | |||
2019 | ccw++; | 2019 | ccw++; |
2020 | recid += count; | 2020 | recid += count; |
2021 | new_track = 0; | 2021 | new_track = 0; |
2022 | /* first idaw for a ccw may start anywhere */ | ||
2023 | if (!idaw_dst) | ||
2024 | idaw_dst = dst; | ||
2022 | } | 2025 | } |
2023 | /* If we start a new idaw, everything is fine and the | 2026 | /* If we start a new idaw, we must make sure that it |
2024 | * start of the new idaw is the start of this segment. | 2027 | * starts on an IDA_BLOCK_SIZE boundary. |
2025 | * If we continue an idaw, we must make sure that the | 2028 | * If we continue an idaw, we must make sure that the |
2026 | * current segment begins where the so far accumulated | 2029 | * current segment begins where the so far accumulated |
2027 | * idaw ends | 2030 | * idaw ends |
2028 | */ | 2031 | */ |
2029 | if (!idaw_dst) | 2032 | if (!idaw_dst) { |
2030 | idaw_dst = dst; | 2033 | if (__pa(dst) & (IDA_BLOCK_SIZE-1)) { |
2034 | dasd_sfree_request(cqr, startdev); | ||
2035 | return ERR_PTR(-ERANGE); | ||
2036 | } else | ||
2037 | idaw_dst = dst; | ||
2038 | } | ||
2031 | if ((idaw_dst + idaw_len) != dst) { | 2039 | if ((idaw_dst + idaw_len) != dst) { |
2032 | dasd_sfree_request(cqr, startdev); | 2040 | dasd_sfree_request(cqr, startdev); |
2033 | return ERR_PTR(-ERANGE); | 2041 | return ERR_PTR(-ERANGE); |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 9e8a2914259b..accd957454e7 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -881,42 +881,6 @@ no_handler: | |||
881 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); | 881 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); |
882 | } | 882 | } |
883 | 883 | ||
884 | static void qdio_call_shutdown(struct work_struct *work) | ||
885 | { | ||
886 | struct ccw_device_private *priv; | ||
887 | struct ccw_device *cdev; | ||
888 | |||
889 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
890 | cdev = priv->cdev; | ||
891 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
892 | put_device(&cdev->dev); | ||
893 | } | ||
894 | |||
895 | static void qdio_int_error(struct ccw_device *cdev) | ||
896 | { | ||
897 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | ||
898 | |||
899 | switch (irq_ptr->state) { | ||
900 | case QDIO_IRQ_STATE_INACTIVE: | ||
901 | case QDIO_IRQ_STATE_CLEANUP: | ||
902 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | ||
903 | break; | ||
904 | case QDIO_IRQ_STATE_ESTABLISHED: | ||
905 | case QDIO_IRQ_STATE_ACTIVE: | ||
906 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); | ||
907 | if (get_device(&cdev->dev)) { | ||
908 | /* Can't call shutdown from interrupt context. */ | ||
909 | PREPARE_WORK(&cdev->private->kick_work, | ||
910 | qdio_call_shutdown); | ||
911 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
912 | } | ||
913 | break; | ||
914 | default: | ||
915 | WARN_ON(1); | ||
916 | } | ||
917 | wake_up(&cdev->private->wait_q); | ||
918 | } | ||
919 | |||
920 | static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, | 884 | static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, |
921 | int dstat) | 885 | int dstat) |
922 | { | 886 | { |
@@ -973,10 +937,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
973 | switch (PTR_ERR(irb)) { | 937 | switch (PTR_ERR(irb)) { |
974 | case -EIO: | 938 | case -EIO: |
975 | DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); | 939 | DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); |
976 | return; | 940 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); |
977 | case -ETIMEDOUT: | 941 | wake_up(&cdev->private->wait_q); |
978 | DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no); | ||
979 | qdio_int_error(cdev); | ||
980 | return; | 942 | return; |
981 | default: | 943 | default: |
982 | WARN_ON(1); | 944 | WARN_ON(1); |
@@ -1001,7 +963,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1001 | case QDIO_IRQ_STATE_ACTIVE: | 963 | case QDIO_IRQ_STATE_ACTIVE: |
1002 | if (cstat & SCHN_STAT_PCI) { | 964 | if (cstat & SCHN_STAT_PCI) { |
1003 | qdio_int_handler_pci(irq_ptr); | 965 | qdio_int_handler_pci(irq_ptr); |
1004 | /* no state change so no need to wake up wait_q */ | ||
1005 | return; | 966 | return; |
1006 | } | 967 | } |
1007 | if ((cstat & ~SCHN_STAT_PCI) || dstat) { | 968 | if ((cstat & ~SCHN_STAT_PCI) || dstat) { |
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index e6d1fc8c54f1..a85ad05e8548 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c | |||
@@ -383,18 +383,22 @@ static int jsf_ioctl_program(void __user *arg) | |||
383 | return 0; | 383 | return 0; |
384 | } | 384 | } |
385 | 385 | ||
386 | static int jsf_ioctl(struct inode *inode, struct file *f, unsigned int cmd, | 386 | static long jsf_ioctl(struct file *f, unsigned int cmd, unsigned long arg) |
387 | unsigned long arg) | ||
388 | { | 387 | { |
388 | lock_kernel(); | ||
389 | int error = -ENOTTY; | 389 | int error = -ENOTTY; |
390 | void __user *argp = (void __user *)arg; | 390 | void __user *argp = (void __user *)arg; |
391 | 391 | ||
392 | if (!capable(CAP_SYS_ADMIN)) | 392 | if (!capable(CAP_SYS_ADMIN)) { |
393 | unlock_kernel(); | ||
393 | return -EPERM; | 394 | return -EPERM; |
395 | } | ||
394 | switch (cmd) { | 396 | switch (cmd) { |
395 | case JSFLASH_IDENT: | 397 | case JSFLASH_IDENT: |
396 | if (copy_to_user(argp, &jsf0.id, JSFIDSZ)) | 398 | if (copy_to_user(argp, &jsf0.id, JSFIDSZ)) { |
399 | unlock_kernel(); | ||
397 | return -EFAULT; | 400 | return -EFAULT; |
401 | } | ||
398 | break; | 402 | break; |
399 | case JSFLASH_ERASE: | 403 | case JSFLASH_ERASE: |
400 | error = jsf_ioctl_erase(arg); | 404 | error = jsf_ioctl_erase(arg); |
@@ -404,6 +408,7 @@ static int jsf_ioctl(struct inode *inode, struct file *f, unsigned int cmd, | |||
404 | break; | 408 | break; |
405 | } | 409 | } |
406 | 410 | ||
411 | unlock_kernel(); | ||
407 | return error; | 412 | return error; |
408 | } | 413 | } |
409 | 414 | ||
@@ -439,7 +444,7 @@ static const struct file_operations jsf_fops = { | |||
439 | .llseek = jsf_lseek, | 444 | .llseek = jsf_lseek, |
440 | .read = jsf_read, | 445 | .read = jsf_read, |
441 | .write = jsf_write, | 446 | .write = jsf_write, |
442 | .ioctl = jsf_ioctl, | 447 | .unlocked_ioctl = jsf_ioctl, |
443 | .mmap = jsf_mmap, | 448 | .mmap = jsf_mmap, |
444 | .open = jsf_open, | 449 | .open = jsf_open, |
445 | .release = jsf_release, | 450 | .release = jsf_release, |
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index 27993c37775d..2c56fd56ec63 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c | |||
@@ -197,9 +197,8 @@ static struct uctrl_driver { | |||
197 | static void uctrl_get_event_status(struct uctrl_driver *); | 197 | static void uctrl_get_event_status(struct uctrl_driver *); |
198 | static void uctrl_get_external_status(struct uctrl_driver *); | 198 | static void uctrl_get_external_status(struct uctrl_driver *); |
199 | 199 | ||
200 | static int | 200 | static long |
201 | uctrl_ioctl(struct inode *inode, struct file *file, | 201 | uctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
202 | unsigned int cmd, unsigned long arg) | ||
203 | { | 202 | { |
204 | switch (cmd) { | 203 | switch (cmd) { |
205 | default: | 204 | default: |
@@ -226,7 +225,7 @@ static irqreturn_t uctrl_interrupt(int irq, void *dev_id) | |||
226 | static const struct file_operations uctrl_fops = { | 225 | static const struct file_operations uctrl_fops = { |
227 | .owner = THIS_MODULE, | 226 | .owner = THIS_MODULE, |
228 | .llseek = no_llseek, | 227 | .llseek = no_llseek, |
229 | .ioctl = uctrl_ioctl, | 228 | .unlocked_ioctl = uctrl_ioctl, |
230 | .open = uctrl_open, | 229 | .open = uctrl_open, |
231 | }; | 230 | }; |
232 | 231 | ||
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index fdb14ec4fd47..8b7983aba8f7 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -2234,10 +2234,10 @@ static int twa_resume(struct pci_dev *pdev) | |||
2234 | pci_set_master(pdev); | 2234 | pci_set_master(pdev); |
2235 | pci_try_set_mwi(pdev); | 2235 | pci_try_set_mwi(pdev); |
2236 | 2236 | ||
2237 | if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) | 2237 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) |
2238 | || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) | 2238 | || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) |
2239 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) | 2239 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) |
2240 | || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { | 2240 | || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { |
2241 | TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); | 2241 | TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); |
2242 | retval = -ENODEV; | 2242 | retval = -ENODEV; |
2243 | goto out_disable_device; | 2243 | goto out_disable_device; |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 280261c451d6..2a889853a106 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -1378,7 +1378,7 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1378 | if (dev->nondasd_support && !dev->in_reset) | 1378 | if (dev->nondasd_support && !dev->in_reset) |
1379 | printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); | 1379 | printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); |
1380 | 1380 | ||
1381 | if (dma_get_required_mask(&dev->pdev->dev) > DMA_32BIT_MASK) | 1381 | if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32)) |
1382 | dev->needs_dac = 1; | 1382 | dev->needs_dac = 1; |
1383 | dev->dac_support = 0; | 1383 | dev->dac_support = 0; |
1384 | if ((sizeof(dma_addr_t) > 4) && dev->needs_dac && | 1384 | if ((sizeof(dma_addr_t) > 4) && dev->needs_dac && |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index b1bd3fc7bae8..36fd2e75da1c 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -1394,7 +1394,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
1394 | */ | 1394 | */ |
1395 | cmd->sense_buffer[8] = 0; /* Information */ | 1395 | cmd->sense_buffer[8] = 0; /* Information */ |
1396 | cmd->sense_buffer[9] = 0xa; /* Add. length */ | 1396 | cmd->sense_buffer[9] = 0xa; /* Add. length */ |
1397 | do_div(bghm, cmd->device->sector_size); | 1397 | bghm /= cmd->device->sector_size; |
1398 | 1398 | ||
1399 | failing_sector = scsi_get_lba(cmd); | 1399 | failing_sector = scsi_get_lba(cmd); |
1400 | failing_sector += bghm; | 1400 | failing_sector += bghm; |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 52427a8324f5..a91f5143ceac 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -855,9 +855,9 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) | |||
855 | if (sizeof(dma_addr_t) > 4) { | 855 | if (sizeof(dma_addr_t) > 4) { |
856 | const uint64_t required_mask = | 856 | const uint64_t required_mask = |
857 | dma_get_required_mask(&pdev->dev); | 857 | dma_get_required_mask(&pdev->dev); |
858 | if ((required_mask > DMA_32BIT_MASK) && !pci_set_dma_mask(pdev, | 858 | if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev, |
859 | DMA_64BIT_MASK) && !pci_set_consistent_dma_mask(pdev, | 859 | DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev, |
860 | DMA_64BIT_MASK)) { | 860 | DMA_BIT_MASK(64))) { |
861 | ioc->base_add_sg_single = &_base_add_sg_single_64; | 861 | ioc->base_add_sg_single = &_base_add_sg_single_64; |
862 | ioc->sge_size = sizeof(Mpi2SGESimple64_t); | 862 | ioc->sge_size = sizeof(Mpi2SGESimple64_t); |
863 | desc = "64"; | 863 | desc = "64"; |
@@ -865,8 +865,8 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) | |||
865 | } | 865 | } |
866 | } | 866 | } |
867 | 867 | ||
868 | if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK) | 868 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) |
869 | && !pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { | 869 | && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { |
870 | ioc->base_add_sg_single = &_base_add_sg_single_32; | 870 | ioc->base_add_sg_single = &_base_add_sg_single_32; |
871 | ioc->sge_size = sizeof(Mpi2SGESimple32_t); | 871 | ioc->sge_size = sizeof(Mpi2SGESimple32_t); |
872 | desc = "32"; | 872 | desc = "32"; |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 687dcf2d0154..5defe5ea5eda 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -1663,7 +1663,7 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha) | |||
1663 | 1663 | ||
1664 | /* Load RISC code. */ | 1664 | /* Load RISC code. */ |
1665 | risc_address = ha->fwstart; | 1665 | risc_address = ha->fwstart; |
1666 | fw_data = (const __le16 *)&fw->data[4]; | 1666 | fw_data = (const __le16 *)&fw->data[6]; |
1667 | risc_code_size = (fw->size - 6) / 2; | 1667 | risc_code_size = (fw->size - 6) / 2; |
1668 | 1668 | ||
1669 | for (i = 0; i < risc_code_size; i++) { | 1669 | for (i = 0; i < risc_code_size; i++) { |
@@ -1722,7 +1722,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) | |||
1722 | 1722 | ||
1723 | /* Load RISC code. */ | 1723 | /* Load RISC code. */ |
1724 | risc_address = ha->fwstart; | 1724 | risc_address = ha->fwstart; |
1725 | fw_data = (const __le16 *)&fw->data[4]; | 1725 | fw_data = (const __le16 *)&fw->data[6]; |
1726 | risc_code_size = (fw->size - 6) / 2; | 1726 | risc_code_size = (fw->size - 6) / 2; |
1727 | 1727 | ||
1728 | dprintk(1, "%s: DMA RISC code (%i) words\n", | 1728 | dprintk(1, "%s: DMA RISC code (%i) words\n", |
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index e1850904ff73..fbc83bebdd8e 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -38,9 +38,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) | |||
38 | { }; | 38 | { }; |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | /* scsi_scan.c */ | ||
42 | int scsi_complete_async_scans(void); | ||
43 | |||
44 | /* scsi_devinfo.c */ | 41 | /* scsi_devinfo.c */ |
45 | extern int scsi_get_device_flags(struct scsi_device *sdev, | 42 | extern int scsi_get_device_flags(struct scsi_device *sdev, |
46 | const unsigned char *vendor, | 43 | const unsigned char *vendor, |
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c index 8a636103083d..2f21af21269a 100644 --- a/drivers/scsi/scsi_wait_scan.c +++ b/drivers/scsi/scsi_wait_scan.c | |||
@@ -11,7 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include "scsi_priv.h" | 14 | #include <scsi/scsi_scan.h> |
15 | 15 | ||
16 | static int __init wait_scan_init(void) | 16 | static int __init wait_scan_init(void) |
17 | { | 17 | { |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 0328fd4006e5..343e3a35b6a3 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -854,7 +854,7 @@ config SERIAL_IMX_CONSOLE | |||
854 | 854 | ||
855 | config SERIAL_UARTLITE | 855 | config SERIAL_UARTLITE |
856 | tristate "Xilinx uartlite serial port support" | 856 | tristate "Xilinx uartlite serial port support" |
857 | depends on PPC32 | 857 | depends on PPC32 || MICROBLAZE |
858 | select SERIAL_CORE | 858 | select SERIAL_CORE |
859 | help | 859 | help |
860 | Say Y here if you want to use the Xilinx uartlite serial controller. | 860 | Say Y here if you want to use the Xilinx uartlite serial controller. |
@@ -1340,7 +1340,7 @@ config SERIAL_NETX_CONSOLE | |||
1340 | 1340 | ||
1341 | config SERIAL_OF_PLATFORM | 1341 | config SERIAL_OF_PLATFORM |
1342 | tristate "Serial port on Open Firmware platform bus" | 1342 | tristate "Serial port on Open Firmware platform bus" |
1343 | depends on PPC_OF | 1343 | depends on PPC_OF || MICROBLAZE |
1344 | depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL | 1344 | depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL |
1345 | help | 1345 | help |
1346 | If you have a PowerPC based system that has serial ports | 1346 | If you have a PowerPC based system that has serial ports |
diff --git a/drivers/serial/max3100.c b/drivers/serial/max3100.c new file mode 100644 index 000000000000..9fd33e5622bd --- /dev/null +++ b/drivers/serial/max3100.c | |||
@@ -0,0 +1,927 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright (C) 2008 Christian Pellegrin <chripell@evolware.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * | ||
11 | * Notes: the MAX3100 doesn't provide an interrupt on CTS so we have | ||
12 | * to use polling for flow control. TX empty IRQ is unusable, since | ||
13 | * writing conf clears FIFO buffer and we cannot have this interrupt | ||
14 | * always asking us for attention. | ||
15 | * | ||
16 | * Example platform data: | ||
17 | |||
18 | static struct plat_max3100 max3100_plat_data = { | ||
19 | .loopback = 0, | ||
20 | .crystal = 0, | ||
21 | .poll_time = 100, | ||
22 | }; | ||
23 | |||
24 | static struct spi_board_info spi_board_info[] = { | ||
25 | { | ||
26 | .modalias = "max3100", | ||
27 | .platform_data = &max3100_plat_data, | ||
28 | .irq = IRQ_EINT12, | ||
29 | .max_speed_hz = 5*1000*1000, | ||
30 | .chip_select = 0, | ||
31 | }, | ||
32 | }; | ||
33 | |||
34 | * The initial minor number is 209 in the low-density serial port: | ||
35 | * mknod /dev/ttyMAX0 c 204 209 | ||
36 | */ | ||
37 | |||
38 | #define MAX3100_MAJOR 204 | ||
39 | #define MAX3100_MINOR 209 | ||
40 | /* 4 MAX3100s should be enough for everyone */ | ||
41 | #define MAX_MAX3100 4 | ||
42 | |||
43 | #include <linux/delay.h> | ||
44 | #include <linux/device.h> | ||
45 | #include <linux/serial_core.h> | ||
46 | #include <linux/serial.h> | ||
47 | #include <linux/spi/spi.h> | ||
48 | #include <linux/freezer.h> | ||
49 | |||
50 | #include <linux/serial_max3100.h> | ||
51 | |||
52 | #define MAX3100_C (1<<14) | ||
53 | #define MAX3100_D (0<<14) | ||
54 | #define MAX3100_W (1<<15) | ||
55 | #define MAX3100_RX (0<<15) | ||
56 | |||
57 | #define MAX3100_WC (MAX3100_W | MAX3100_C) | ||
58 | #define MAX3100_RC (MAX3100_RX | MAX3100_C) | ||
59 | #define MAX3100_WD (MAX3100_W | MAX3100_D) | ||
60 | #define MAX3100_RD (MAX3100_RX | MAX3100_D) | ||
61 | #define MAX3100_CMD (3 << 14) | ||
62 | |||
63 | #define MAX3100_T (1<<14) | ||
64 | #define MAX3100_R (1<<15) | ||
65 | |||
66 | #define MAX3100_FEN (1<<13) | ||
67 | #define MAX3100_SHDN (1<<12) | ||
68 | #define MAX3100_TM (1<<11) | ||
69 | #define MAX3100_RM (1<<10) | ||
70 | #define MAX3100_PM (1<<9) | ||
71 | #define MAX3100_RAM (1<<8) | ||
72 | #define MAX3100_IR (1<<7) | ||
73 | #define MAX3100_ST (1<<6) | ||
74 | #define MAX3100_PE (1<<5) | ||
75 | #define MAX3100_L (1<<4) | ||
76 | #define MAX3100_BAUD (0xf) | ||
77 | |||
78 | #define MAX3100_TE (1<<10) | ||
79 | #define MAX3100_RAFE (1<<10) | ||
80 | #define MAX3100_RTS (1<<9) | ||
81 | #define MAX3100_CTS (1<<9) | ||
82 | #define MAX3100_PT (1<<8) | ||
83 | #define MAX3100_DATA (0xff) | ||
84 | |||
85 | #define MAX3100_RT (MAX3100_R | MAX3100_T) | ||
86 | #define MAX3100_RTC (MAX3100_RT | MAX3100_CTS | MAX3100_RAFE) | ||
87 | |||
88 | /* the following simulate a status reg for ignore_status_mask */ | ||
89 | #define MAX3100_STATUS_PE 1 | ||
90 | #define MAX3100_STATUS_FE 2 | ||
91 | #define MAX3100_STATUS_OE 4 | ||
92 | |||
93 | struct max3100_port { | ||
94 | struct uart_port port; | ||
95 | struct spi_device *spi; | ||
96 | |||
97 | int cts; /* last CTS received for flow ctrl */ | ||
98 | int tx_empty; /* last TX empty bit */ | ||
99 | |||
100 | spinlock_t conf_lock; /* shared data */ | ||
101 | int conf_commit; /* need to make changes */ | ||
102 | int conf; /* configuration for the MAX31000 | ||
103 | * (bits 0-7, bits 8-11 are irqs) */ | ||
104 | int rts_commit; /* need to change rts */ | ||
105 | int rts; /* rts status */ | ||
106 | int baud; /* current baud rate */ | ||
107 | |||
108 | int parity; /* keeps track if we should send parity */ | ||
109 | #define MAX3100_PARITY_ON 1 | ||
110 | #define MAX3100_PARITY_ODD 2 | ||
111 | #define MAX3100_7BIT 4 | ||
112 | int rx_enabled; /* if we should rx chars */ | ||
113 | |||
114 | int irq; /* irq assigned to the max3100 */ | ||
115 | |||
116 | int minor; /* minor number */ | ||
117 | int crystal; /* 1 if 3.6864Mhz crystal 0 for 1.8432 */ | ||
118 | int loopback; /* 1 if we are in loopback mode */ | ||
119 | |||
120 | /* for handling irqs: need workqueue since we do spi_sync */ | ||
121 | struct workqueue_struct *workqueue; | ||
122 | struct work_struct work; | ||
123 | /* set to 1 to make the workhandler exit as soon as possible */ | ||
124 | int force_end_work; | ||
125 | /* need to know we are suspending to avoid deadlock on workqueue */ | ||
126 | int suspending; | ||
127 | |||
128 | /* hook for suspending MAX3100 via dedicated pin */ | ||
129 | void (*max3100_hw_suspend) (int suspend); | ||
130 | |||
131 | /* poll time (in ms) for ctrl lines */ | ||
132 | int poll_time; | ||
133 | /* and its timer */ | ||
134 | struct timer_list timer; | ||
135 | }; | ||
136 | |||
137 | static struct max3100_port *max3100s[MAX_MAX3100]; /* the chips */ | ||
138 | static DEFINE_MUTEX(max3100s_lock); /* race on probe */ | ||
139 | |||
140 | static int max3100_do_parity(struct max3100_port *s, u16 c) | ||
141 | { | ||
142 | int parity; | ||
143 | |||
144 | if (s->parity & MAX3100_PARITY_ODD) | ||
145 | parity = 1; | ||
146 | else | ||
147 | parity = 0; | ||
148 | |||
149 | if (s->parity & MAX3100_7BIT) | ||
150 | c &= 0x7f; | ||
151 | else | ||
152 | c &= 0xff; | ||
153 | |||
154 | parity = parity ^ (hweight8(c) & 1); | ||
155 | return parity; | ||
156 | } | ||
157 | |||
158 | static int max3100_check_parity(struct max3100_port *s, u16 c) | ||
159 | { | ||
160 | return max3100_do_parity(s, c) == ((c >> 8) & 1); | ||
161 | } | ||
162 | |||
163 | static void max3100_calc_parity(struct max3100_port *s, u16 *c) | ||
164 | { | ||
165 | if (s->parity & MAX3100_7BIT) | ||
166 | *c &= 0x7f; | ||
167 | else | ||
168 | *c &= 0xff; | ||
169 | |||
170 | if (s->parity & MAX3100_PARITY_ON) | ||
171 | *c |= max3100_do_parity(s, *c) << 8; | ||
172 | } | ||
173 | |||
174 | static void max3100_work(struct work_struct *w); | ||
175 | |||
176 | static void max3100_dowork(struct max3100_port *s) | ||
177 | { | ||
178 | if (!s->force_end_work && !work_pending(&s->work) && | ||
179 | !freezing(current) && !s->suspending) | ||
180 | queue_work(s->workqueue, &s->work); | ||
181 | } | ||
182 | |||
183 | static void max3100_timeout(unsigned long data) | ||
184 | { | ||
185 | struct max3100_port *s = (struct max3100_port *)data; | ||
186 | |||
187 | if (s->port.info) { | ||
188 | max3100_dowork(s); | ||
189 | mod_timer(&s->timer, jiffies + s->poll_time); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx) | ||
194 | { | ||
195 | struct spi_message message; | ||
196 | u16 etx, erx; | ||
197 | int status; | ||
198 | struct spi_transfer tran = { | ||
199 | .tx_buf = &etx, | ||
200 | .rx_buf = &erx, | ||
201 | .len = 2, | ||
202 | }; | ||
203 | |||
204 | etx = cpu_to_be16(tx); | ||
205 | spi_message_init(&message); | ||
206 | spi_message_add_tail(&tran, &message); | ||
207 | status = spi_sync(s->spi, &message); | ||
208 | if (status) { | ||
209 | dev_warn(&s->spi->dev, "error while calling spi_sync\n"); | ||
210 | return -EIO; | ||
211 | } | ||
212 | *rx = be16_to_cpu(erx); | ||
213 | s->tx_empty = (*rx & MAX3100_T) > 0; | ||
214 | dev_dbg(&s->spi->dev, "%04x - %04x\n", tx, *rx); | ||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static int max3100_handlerx(struct max3100_port *s, u16 rx) | ||
219 | { | ||
220 | unsigned int ch, flg, status = 0; | ||
221 | int ret = 0, cts; | ||
222 | |||
223 | if (rx & MAX3100_R && s->rx_enabled) { | ||
224 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
225 | ch = rx & (s->parity & MAX3100_7BIT ? 0x7f : 0xff); | ||
226 | if (rx & MAX3100_RAFE) { | ||
227 | s->port.icount.frame++; | ||
228 | flg = TTY_FRAME; | ||
229 | status |= MAX3100_STATUS_FE; | ||
230 | } else { | ||
231 | if (s->parity & MAX3100_PARITY_ON) { | ||
232 | if (max3100_check_parity(s, rx)) { | ||
233 | s->port.icount.rx++; | ||
234 | flg = TTY_NORMAL; | ||
235 | } else { | ||
236 | s->port.icount.parity++; | ||
237 | flg = TTY_PARITY; | ||
238 | status |= MAX3100_STATUS_PE; | ||
239 | } | ||
240 | } else { | ||
241 | s->port.icount.rx++; | ||
242 | flg = TTY_NORMAL; | ||
243 | } | ||
244 | } | ||
245 | uart_insert_char(&s->port, status, MAX3100_STATUS_OE, ch, flg); | ||
246 | ret = 1; | ||
247 | } | ||
248 | |||
249 | cts = (rx & MAX3100_CTS) > 0; | ||
250 | if (s->cts != cts) { | ||
251 | s->cts = cts; | ||
252 | uart_handle_cts_change(&s->port, cts ? TIOCM_CTS : 0); | ||
253 | } | ||
254 | |||
255 | return ret; | ||
256 | } | ||
257 | |||
258 | static void max3100_work(struct work_struct *w) | ||
259 | { | ||
260 | struct max3100_port *s = container_of(w, struct max3100_port, work); | ||
261 | int rxchars; | ||
262 | u16 tx, rx; | ||
263 | int conf, cconf, rts, crts; | ||
264 | struct circ_buf *xmit = &s->port.info->xmit; | ||
265 | |||
266 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
267 | |||
268 | rxchars = 0; | ||
269 | do { | ||
270 | spin_lock(&s->conf_lock); | ||
271 | conf = s->conf; | ||
272 | cconf = s->conf_commit; | ||
273 | s->conf_commit = 0; | ||
274 | rts = s->rts; | ||
275 | crts = s->rts_commit; | ||
276 | s->rts_commit = 0; | ||
277 | spin_unlock(&s->conf_lock); | ||
278 | if (cconf) | ||
279 | max3100_sr(s, MAX3100_WC | conf, &rx); | ||
280 | if (crts) { | ||
281 | max3100_sr(s, MAX3100_WD | MAX3100_TE | | ||
282 | (s->rts ? MAX3100_RTS : 0), &rx); | ||
283 | rxchars += max3100_handlerx(s, rx); | ||
284 | } | ||
285 | |||
286 | max3100_sr(s, MAX3100_RD, &rx); | ||
287 | rxchars += max3100_handlerx(s, rx); | ||
288 | |||
289 | if (rx & MAX3100_T) { | ||
290 | tx = 0xffff; | ||
291 | if (s->port.x_char) { | ||
292 | tx = s->port.x_char; | ||
293 | s->port.icount.tx++; | ||
294 | s->port.x_char = 0; | ||
295 | } else if (!uart_circ_empty(xmit) && | ||
296 | !uart_tx_stopped(&s->port)) { | ||
297 | tx = xmit->buf[xmit->tail]; | ||
298 | xmit->tail = (xmit->tail + 1) & | ||
299 | (UART_XMIT_SIZE - 1); | ||
300 | s->port.icount.tx++; | ||
301 | } | ||
302 | if (tx != 0xffff) { | ||
303 | max3100_calc_parity(s, &tx); | ||
304 | tx |= MAX3100_WD | (s->rts ? MAX3100_RTS : 0); | ||
305 | max3100_sr(s, tx, &rx); | ||
306 | rxchars += max3100_handlerx(s, rx); | ||
307 | } | ||
308 | } | ||
309 | |||
310 | if (rxchars > 16 && s->port.info->port.tty != NULL) { | ||
311 | tty_flip_buffer_push(s->port.info->port.tty); | ||
312 | rxchars = 0; | ||
313 | } | ||
314 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
315 | uart_write_wakeup(&s->port); | ||
316 | |||
317 | } while (!s->force_end_work && | ||
318 | !freezing(current) && | ||
319 | ((rx & MAX3100_R) || | ||
320 | (!uart_circ_empty(xmit) && | ||
321 | !uart_tx_stopped(&s->port)))); | ||
322 | |||
323 | if (rxchars > 0 && s->port.info->port.tty != NULL) | ||
324 | tty_flip_buffer_push(s->port.info->port.tty); | ||
325 | } | ||
326 | |||
327 | static irqreturn_t max3100_irq(int irqno, void *dev_id) | ||
328 | { | ||
329 | struct max3100_port *s = dev_id; | ||
330 | |||
331 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
332 | |||
333 | max3100_dowork(s); | ||
334 | return IRQ_HANDLED; | ||
335 | } | ||
336 | |||
337 | static void max3100_enable_ms(struct uart_port *port) | ||
338 | { | ||
339 | struct max3100_port *s = container_of(port, | ||
340 | struct max3100_port, | ||
341 | port); | ||
342 | |||
343 | if (s->poll_time > 0) | ||
344 | mod_timer(&s->timer, jiffies); | ||
345 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
346 | } | ||
347 | |||
348 | static void max3100_start_tx(struct uart_port *port) | ||
349 | { | ||
350 | struct max3100_port *s = container_of(port, | ||
351 | struct max3100_port, | ||
352 | port); | ||
353 | |||
354 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
355 | |||
356 | max3100_dowork(s); | ||
357 | } | ||
358 | |||
359 | static void max3100_stop_rx(struct uart_port *port) | ||
360 | { | ||
361 | struct max3100_port *s = container_of(port, | ||
362 | struct max3100_port, | ||
363 | port); | ||
364 | |||
365 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
366 | |||
367 | s->rx_enabled = 0; | ||
368 | spin_lock(&s->conf_lock); | ||
369 | s->conf &= ~MAX3100_RM; | ||
370 | s->conf_commit = 1; | ||
371 | spin_unlock(&s->conf_lock); | ||
372 | max3100_dowork(s); | ||
373 | } | ||
374 | |||
375 | static unsigned int max3100_tx_empty(struct uart_port *port) | ||
376 | { | ||
377 | struct max3100_port *s = container_of(port, | ||
378 | struct max3100_port, | ||
379 | port); | ||
380 | |||
381 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
382 | |||
383 | /* may not be truly up-to-date */ | ||
384 | max3100_dowork(s); | ||
385 | return s->tx_empty; | ||
386 | } | ||
387 | |||
388 | static unsigned int max3100_get_mctrl(struct uart_port *port) | ||
389 | { | ||
390 | struct max3100_port *s = container_of(port, | ||
391 | struct max3100_port, | ||
392 | port); | ||
393 | |||
394 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
395 | |||
396 | /* may not be truly up-to-date */ | ||
397 | max3100_dowork(s); | ||
398 | /* always assert DCD and DSR since these lines are not wired */ | ||
399 | return (s->cts ? TIOCM_CTS : 0) | TIOCM_DSR | TIOCM_CAR; | ||
400 | } | ||
401 | |||
402 | static void max3100_set_mctrl(struct uart_port *port, unsigned int mctrl) | ||
403 | { | ||
404 | struct max3100_port *s = container_of(port, | ||
405 | struct max3100_port, | ||
406 | port); | ||
407 | int rts; | ||
408 | |||
409 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
410 | |||
411 | rts = (mctrl & TIOCM_RTS) > 0; | ||
412 | |||
413 | spin_lock(&s->conf_lock); | ||
414 | if (s->rts != rts) { | ||
415 | s->rts = rts; | ||
416 | s->rts_commit = 1; | ||
417 | max3100_dowork(s); | ||
418 | } | ||
419 | spin_unlock(&s->conf_lock); | ||
420 | } | ||
421 | |||
422 | static void | ||
423 | max3100_set_termios(struct uart_port *port, struct ktermios *termios, | ||
424 | struct ktermios *old) | ||
425 | { | ||
426 | struct max3100_port *s = container_of(port, | ||
427 | struct max3100_port, | ||
428 | port); | ||
429 | int baud = 0; | ||
430 | unsigned cflag; | ||
431 | u32 param_new, param_mask, parity = 0; | ||
432 | struct tty_struct *tty = s->port.info->port.tty; | ||
433 | |||
434 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
435 | if (!tty) | ||
436 | return; | ||
437 | |||
438 | cflag = termios->c_cflag; | ||
439 | param_new = 0; | ||
440 | param_mask = 0; | ||
441 | |||
442 | baud = tty_get_baud_rate(tty); | ||
443 | param_new = s->conf & MAX3100_BAUD; | ||
444 | switch (baud) { | ||
445 | case 300: | ||
446 | if (s->crystal) | ||
447 | baud = s->baud; | ||
448 | else | ||
449 | param_new = 15; | ||
450 | break; | ||
451 | case 600: | ||
452 | param_new = 14 + s->crystal; | ||
453 | break; | ||
454 | case 1200: | ||
455 | param_new = 13 + s->crystal; | ||
456 | break; | ||
457 | case 2400: | ||
458 | param_new = 12 + s->crystal; | ||
459 | break; | ||
460 | case 4800: | ||
461 | param_new = 11 + s->crystal; | ||
462 | break; | ||
463 | case 9600: | ||
464 | param_new = 10 + s->crystal; | ||
465 | break; | ||
466 | case 19200: | ||
467 | param_new = 9 + s->crystal; | ||
468 | break; | ||
469 | case 38400: | ||
470 | param_new = 8 + s->crystal; | ||
471 | break; | ||
472 | case 57600: | ||
473 | param_new = 1 + s->crystal; | ||
474 | break; | ||
475 | case 115200: | ||
476 | param_new = 0 + s->crystal; | ||
477 | break; | ||
478 | case 230400: | ||
479 | if (s->crystal) | ||
480 | param_new = 0; | ||
481 | else | ||
482 | baud = s->baud; | ||
483 | break; | ||
484 | default: | ||
485 | baud = s->baud; | ||
486 | } | ||
487 | tty_encode_baud_rate(tty, baud, baud); | ||
488 | s->baud = baud; | ||
489 | param_mask |= MAX3100_BAUD; | ||
490 | |||
491 | if ((cflag & CSIZE) == CS8) { | ||
492 | param_new &= ~MAX3100_L; | ||
493 | parity &= ~MAX3100_7BIT; | ||
494 | } else { | ||
495 | param_new |= MAX3100_L; | ||
496 | parity |= MAX3100_7BIT; | ||
497 | cflag = (cflag & ~CSIZE) | CS7; | ||
498 | } | ||
499 | param_mask |= MAX3100_L; | ||
500 | |||
501 | if (cflag & CSTOPB) | ||
502 | param_new |= MAX3100_ST; | ||
503 | else | ||
504 | param_new &= ~MAX3100_ST; | ||
505 | param_mask |= MAX3100_ST; | ||
506 | |||
507 | if (cflag & PARENB) { | ||
508 | param_new |= MAX3100_PE; | ||
509 | parity |= MAX3100_PARITY_ON; | ||
510 | } else { | ||
511 | param_new &= ~MAX3100_PE; | ||
512 | parity &= ~MAX3100_PARITY_ON; | ||
513 | } | ||
514 | param_mask |= MAX3100_PE; | ||
515 | |||
516 | if (cflag & PARODD) | ||
517 | parity |= MAX3100_PARITY_ODD; | ||
518 | else | ||
519 | parity &= ~MAX3100_PARITY_ODD; | ||
520 | |||
521 | /* mask termios capabilities we don't support */ | ||
522 | cflag &= ~CMSPAR; | ||
523 | termios->c_cflag = cflag; | ||
524 | |||
525 | s->port.ignore_status_mask = 0; | ||
526 | if (termios->c_iflag & IGNPAR) | ||
527 | s->port.ignore_status_mask |= | ||
528 | MAX3100_STATUS_PE | MAX3100_STATUS_FE | | ||
529 | MAX3100_STATUS_OE; | ||
530 | |||
531 | /* we are sending char from a workqueue so enable */ | ||
532 | s->port.info->port.tty->low_latency = 1; | ||
533 | |||
534 | if (s->poll_time > 0) | ||
535 | del_timer_sync(&s->timer); | ||
536 | |||
537 | uart_update_timeout(port, termios->c_cflag, baud); | ||
538 | |||
539 | spin_lock(&s->conf_lock); | ||
540 | s->conf = (s->conf & ~param_mask) | (param_new & param_mask); | ||
541 | s->conf_commit = 1; | ||
542 | s->parity = parity; | ||
543 | spin_unlock(&s->conf_lock); | ||
544 | max3100_dowork(s); | ||
545 | |||
546 | if (UART_ENABLE_MS(&s->port, termios->c_cflag)) | ||
547 | max3100_enable_ms(&s->port); | ||
548 | } | ||
549 | |||
550 | static void max3100_shutdown(struct uart_port *port) | ||
551 | { | ||
552 | struct max3100_port *s = container_of(port, | ||
553 | struct max3100_port, | ||
554 | port); | ||
555 | |||
556 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
557 | |||
558 | if (s->suspending) | ||
559 | return; | ||
560 | |||
561 | s->force_end_work = 1; | ||
562 | |||
563 | if (s->poll_time > 0) | ||
564 | del_timer_sync(&s->timer); | ||
565 | |||
566 | if (s->workqueue) { | ||
567 | flush_workqueue(s->workqueue); | ||
568 | destroy_workqueue(s->workqueue); | ||
569 | s->workqueue = NULL; | ||
570 | } | ||
571 | if (s->irq) | ||
572 | free_irq(s->irq, s); | ||
573 | |||
574 | /* set shutdown mode to save power */ | ||
575 | if (s->max3100_hw_suspend) | ||
576 | s->max3100_hw_suspend(1); | ||
577 | else { | ||
578 | u16 tx, rx; | ||
579 | |||
580 | tx = MAX3100_WC | MAX3100_SHDN; | ||
581 | max3100_sr(s, tx, &rx); | ||
582 | } | ||
583 | } | ||
584 | |||
585 | static int max3100_startup(struct uart_port *port) | ||
586 | { | ||
587 | struct max3100_port *s = container_of(port, | ||
588 | struct max3100_port, | ||
589 | port); | ||
590 | char b[12]; | ||
591 | |||
592 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
593 | |||
594 | s->conf = MAX3100_RM; | ||
595 | s->baud = s->crystal ? 230400 : 115200; | ||
596 | s->rx_enabled = 1; | ||
597 | |||
598 | if (s->suspending) | ||
599 | return 0; | ||
600 | |||
601 | s->force_end_work = 0; | ||
602 | s->parity = 0; | ||
603 | s->rts = 0; | ||
604 | |||
605 | sprintf(b, "max3100-%d", s->minor); | ||
606 | s->workqueue = create_freezeable_workqueue(b); | ||
607 | if (!s->workqueue) { | ||
608 | dev_warn(&s->spi->dev, "cannot create workqueue\n"); | ||
609 | return -EBUSY; | ||
610 | } | ||
611 | INIT_WORK(&s->work, max3100_work); | ||
612 | |||
613 | if (request_irq(s->irq, max3100_irq, | ||
614 | IRQF_TRIGGER_FALLING, "max3100", s) < 0) { | ||
615 | dev_warn(&s->spi->dev, "cannot allocate irq %d\n", s->irq); | ||
616 | s->irq = 0; | ||
617 | destroy_workqueue(s->workqueue); | ||
618 | s->workqueue = NULL; | ||
619 | return -EBUSY; | ||
620 | } | ||
621 | |||
622 | if (s->loopback) { | ||
623 | u16 tx, rx; | ||
624 | tx = 0x4001; | ||
625 | max3100_sr(s, tx, &rx); | ||
626 | } | ||
627 | |||
628 | if (s->max3100_hw_suspend) | ||
629 | s->max3100_hw_suspend(0); | ||
630 | s->conf_commit = 1; | ||
631 | max3100_dowork(s); | ||
632 | /* wait for clock to settle */ | ||
633 | msleep(50); | ||
634 | |||
635 | max3100_enable_ms(&s->port); | ||
636 | |||
637 | return 0; | ||
638 | } | ||
639 | |||
640 | static const char *max3100_type(struct uart_port *port) | ||
641 | { | ||
642 | struct max3100_port *s = container_of(port, | ||
643 | struct max3100_port, | ||
644 | port); | ||
645 | |||
646 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
647 | |||
648 | return s->port.type == PORT_MAX3100 ? "MAX3100" : NULL; | ||
649 | } | ||
650 | |||
651 | static void max3100_release_port(struct uart_port *port) | ||
652 | { | ||
653 | struct max3100_port *s = container_of(port, | ||
654 | struct max3100_port, | ||
655 | port); | ||
656 | |||
657 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
658 | } | ||
659 | |||
660 | static void max3100_config_port(struct uart_port *port, int flags) | ||
661 | { | ||
662 | struct max3100_port *s = container_of(port, | ||
663 | struct max3100_port, | ||
664 | port); | ||
665 | |||
666 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
667 | |||
668 | if (flags & UART_CONFIG_TYPE) | ||
669 | s->port.type = PORT_MAX3100; | ||
670 | } | ||
671 | |||
672 | static int max3100_verify_port(struct uart_port *port, | ||
673 | struct serial_struct *ser) | ||
674 | { | ||
675 | struct max3100_port *s = container_of(port, | ||
676 | struct max3100_port, | ||
677 | port); | ||
678 | int ret = -EINVAL; | ||
679 | |||
680 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
681 | |||
682 | if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3100) | ||
683 | ret = 0; | ||
684 | return ret; | ||
685 | } | ||
686 | |||
687 | static void max3100_stop_tx(struct uart_port *port) | ||
688 | { | ||
689 | struct max3100_port *s = container_of(port, | ||
690 | struct max3100_port, | ||
691 | port); | ||
692 | |||
693 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
694 | } | ||
695 | |||
696 | static int max3100_request_port(struct uart_port *port) | ||
697 | { | ||
698 | struct max3100_port *s = container_of(port, | ||
699 | struct max3100_port, | ||
700 | port); | ||
701 | |||
702 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | static void max3100_break_ctl(struct uart_port *port, int break_state) | ||
707 | { | ||
708 | struct max3100_port *s = container_of(port, | ||
709 | struct max3100_port, | ||
710 | port); | ||
711 | |||
712 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
713 | } | ||
714 | |||
715 | static struct uart_ops max3100_ops = { | ||
716 | .tx_empty = max3100_tx_empty, | ||
717 | .set_mctrl = max3100_set_mctrl, | ||
718 | .get_mctrl = max3100_get_mctrl, | ||
719 | .stop_tx = max3100_stop_tx, | ||
720 | .start_tx = max3100_start_tx, | ||
721 | .stop_rx = max3100_stop_rx, | ||
722 | .enable_ms = max3100_enable_ms, | ||
723 | .break_ctl = max3100_break_ctl, | ||
724 | .startup = max3100_startup, | ||
725 | .shutdown = max3100_shutdown, | ||
726 | .set_termios = max3100_set_termios, | ||
727 | .type = max3100_type, | ||
728 | .release_port = max3100_release_port, | ||
729 | .request_port = max3100_request_port, | ||
730 | .config_port = max3100_config_port, | ||
731 | .verify_port = max3100_verify_port, | ||
732 | }; | ||
733 | |||
734 | static struct uart_driver max3100_uart_driver = { | ||
735 | .owner = THIS_MODULE, | ||
736 | .driver_name = "ttyMAX", | ||
737 | .dev_name = "ttyMAX", | ||
738 | .major = MAX3100_MAJOR, | ||
739 | .minor = MAX3100_MINOR, | ||
740 | .nr = MAX_MAX3100, | ||
741 | }; | ||
742 | static int uart_driver_registered; | ||
743 | |||
744 | static int __devinit max3100_probe(struct spi_device *spi) | ||
745 | { | ||
746 | int i, retval; | ||
747 | struct plat_max3100 *pdata; | ||
748 | u16 tx, rx; | ||
749 | |||
750 | mutex_lock(&max3100s_lock); | ||
751 | |||
752 | if (!uart_driver_registered) { | ||
753 | uart_driver_registered = 1; | ||
754 | retval = uart_register_driver(&max3100_uart_driver); | ||
755 | if (retval) { | ||
756 | printk(KERN_ERR "Couldn't register max3100 uart driver\n"); | ||
757 | mutex_unlock(&max3100s_lock); | ||
758 | return retval; | ||
759 | } | ||
760 | } | ||
761 | |||
762 | for (i = 0; i < MAX_MAX3100; i++) | ||
763 | if (!max3100s[i]) | ||
764 | break; | ||
765 | if (i == MAX_MAX3100) { | ||
766 | dev_warn(&spi->dev, "too many MAX3100 chips\n"); | ||
767 | mutex_unlock(&max3100s_lock); | ||
768 | return -ENOMEM; | ||
769 | } | ||
770 | |||
771 | max3100s[i] = kzalloc(sizeof(struct max3100_port), GFP_KERNEL); | ||
772 | if (!max3100s[i]) { | ||
773 | dev_warn(&spi->dev, | ||
774 | "kmalloc for max3100 structure %d failed!\n", i); | ||
775 | mutex_unlock(&max3100s_lock); | ||
776 | return -ENOMEM; | ||
777 | } | ||
778 | max3100s[i]->spi = spi; | ||
779 | max3100s[i]->irq = spi->irq; | ||
780 | spin_lock_init(&max3100s[i]->conf_lock); | ||
781 | dev_set_drvdata(&spi->dev, max3100s[i]); | ||
782 | pdata = spi->dev.platform_data; | ||
783 | max3100s[i]->crystal = pdata->crystal; | ||
784 | max3100s[i]->loopback = pdata->loopback; | ||
785 | max3100s[i]->poll_time = pdata->poll_time * HZ / 1000; | ||
786 | if (pdata->poll_time > 0 && max3100s[i]->poll_time == 0) | ||
787 | max3100s[i]->poll_time = 1; | ||
788 | max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend; | ||
789 | max3100s[i]->minor = i; | ||
790 | init_timer(&max3100s[i]->timer); | ||
791 | max3100s[i]->timer.function = max3100_timeout; | ||
792 | max3100s[i]->timer.data = (unsigned long) max3100s[i]; | ||
793 | |||
794 | dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i); | ||
795 | max3100s[i]->port.irq = max3100s[i]->irq; | ||
796 | max3100s[i]->port.uartclk = max3100s[i]->crystal ? 3686400 : 1843200; | ||
797 | max3100s[i]->port.fifosize = 16; | ||
798 | max3100s[i]->port.ops = &max3100_ops; | ||
799 | max3100s[i]->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; | ||
800 | max3100s[i]->port.line = i; | ||
801 | max3100s[i]->port.type = PORT_MAX3100; | ||
802 | max3100s[i]->port.dev = &spi->dev; | ||
803 | retval = uart_add_one_port(&max3100_uart_driver, &max3100s[i]->port); | ||
804 | if (retval < 0) | ||
805 | dev_warn(&spi->dev, | ||
806 | "uart_add_one_port failed for line %d with error %d\n", | ||
807 | i, retval); | ||
808 | |||
809 | /* set shutdown mode to save power. Will be woken-up on open */ | ||
810 | if (max3100s[i]->max3100_hw_suspend) | ||
811 | max3100s[i]->max3100_hw_suspend(1); | ||
812 | else { | ||
813 | tx = MAX3100_WC | MAX3100_SHDN; | ||
814 | max3100_sr(max3100s[i], tx, &rx); | ||
815 | } | ||
816 | mutex_unlock(&max3100s_lock); | ||
817 | return 0; | ||
818 | } | ||
819 | |||
820 | static int __devexit max3100_remove(struct spi_device *spi) | ||
821 | { | ||
822 | struct max3100_port *s = dev_get_drvdata(&spi->dev); | ||
823 | int i; | ||
824 | |||
825 | mutex_lock(&max3100s_lock); | ||
826 | |||
827 | /* find out the index for the chip we are removing */ | ||
828 | for (i = 0; i < MAX_MAX3100; i++) | ||
829 | if (max3100s[i] == s) | ||
830 | break; | ||
831 | |||
832 | dev_dbg(&spi->dev, "%s: removing port %d\n", __func__, i); | ||
833 | uart_remove_one_port(&max3100_uart_driver, &max3100s[i]->port); | ||
834 | kfree(max3100s[i]); | ||
835 | max3100s[i] = NULL; | ||
836 | |||
837 | /* check if this is the last chip we have */ | ||
838 | for (i = 0; i < MAX_MAX3100; i++) | ||
839 | if (max3100s[i]) { | ||
840 | mutex_unlock(&max3100s_lock); | ||
841 | return 0; | ||
842 | } | ||
843 | pr_debug("removing max3100 driver\n"); | ||
844 | uart_unregister_driver(&max3100_uart_driver); | ||
845 | |||
846 | mutex_unlock(&max3100s_lock); | ||
847 | return 0; | ||
848 | } | ||
849 | |||
850 | #ifdef CONFIG_PM | ||
851 | |||
852 | static int max3100_suspend(struct spi_device *spi, pm_message_t state) | ||
853 | { | ||
854 | struct max3100_port *s = dev_get_drvdata(&spi->dev); | ||
855 | |||
856 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
857 | |||
858 | disable_irq(s->irq); | ||
859 | |||
860 | s->suspending = 1; | ||
861 | uart_suspend_port(&max3100_uart_driver, &s->port); | ||
862 | |||
863 | if (s->max3100_hw_suspend) | ||
864 | s->max3100_hw_suspend(1); | ||
865 | else { | ||
866 | /* no HW suspend, so do SW one */ | ||
867 | u16 tx, rx; | ||
868 | |||
869 | tx = MAX3100_WC | MAX3100_SHDN; | ||
870 | max3100_sr(s, tx, &rx); | ||
871 | } | ||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | static int max3100_resume(struct spi_device *spi) | ||
876 | { | ||
877 | struct max3100_port *s = dev_get_drvdata(&spi->dev); | ||
878 | |||
879 | dev_dbg(&s->spi->dev, "%s\n", __func__); | ||
880 | |||
881 | if (s->max3100_hw_suspend) | ||
882 | s->max3100_hw_suspend(0); | ||
883 | uart_resume_port(&max3100_uart_driver, &s->port); | ||
884 | s->suspending = 0; | ||
885 | |||
886 | enable_irq(s->irq); | ||
887 | |||
888 | s->conf_commit = 1; | ||
889 | if (s->workqueue) | ||
890 | max3100_dowork(s); | ||
891 | |||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | #else | ||
896 | #define max3100_suspend NULL | ||
897 | #define max3100_resume NULL | ||
898 | #endif | ||
899 | |||
900 | static struct spi_driver max3100_driver = { | ||
901 | .driver = { | ||
902 | .name = "max3100", | ||
903 | .bus = &spi_bus_type, | ||
904 | .owner = THIS_MODULE, | ||
905 | }, | ||
906 | |||
907 | .probe = max3100_probe, | ||
908 | .remove = __devexit_p(max3100_remove), | ||
909 | .suspend = max3100_suspend, | ||
910 | .resume = max3100_resume, | ||
911 | }; | ||
912 | |||
913 | static int __init max3100_init(void) | ||
914 | { | ||
915 | return spi_register_driver(&max3100_driver); | ||
916 | } | ||
917 | module_init(max3100_init); | ||
918 | |||
919 | static void __exit max3100_exit(void) | ||
920 | { | ||
921 | spi_unregister_driver(&max3100_driver); | ||
922 | } | ||
923 | module_exit(max3100_exit); | ||
924 | |||
925 | MODULE_DESCRIPTION("MAX3100 driver"); | ||
926 | MODULE_AUTHOR("Christian Pellegrin <chripell@evolware.org>"); | ||
927 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c index 41ac94872b8d..e06686ae858b 100644 --- a/drivers/serial/samsung.c +++ b/drivers/serial/samsung.c | |||
@@ -127,7 +127,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port) | |||
127 | struct s3c24xx_uart_port *ourport = to_ourport(port); | 127 | struct s3c24xx_uart_port *ourport = to_ourport(port); |
128 | 128 | ||
129 | if (tx_enabled(port)) { | 129 | if (tx_enabled(port)) { |
130 | disable_irq(ourport->tx_irq); | 130 | disable_irq_nosync(ourport->tx_irq); |
131 | tx_enabled(port) = 0; | 131 | tx_enabled(port) = 0; |
132 | if (port->flags & UPF_CONS_FLOW) | 132 | if (port->flags & UPF_CONS_FLOW) |
133 | s3c24xx_serial_rx_enable(port); | 133 | s3c24xx_serial_rx_enable(port); |
@@ -154,7 +154,7 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port) | |||
154 | 154 | ||
155 | if (rx_enabled(port)) { | 155 | if (rx_enabled(port)) { |
156 | dbg("s3c24xx_serial_stop_rx: port=%p\n", port); | 156 | dbg("s3c24xx_serial_stop_rx: port=%p\n", port); |
157 | disable_irq(ourport->rx_irq); | 157 | disable_irq_nosync(ourport->rx_irq); |
158 | rx_enabled(port) = 0; | 158 | rx_enabled(port) = 0; |
159 | } | 159 | } |
160 | } | 160 | } |
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index a4dc79b1d7ab..47c6837850b1 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c | |||
@@ -1178,7 +1178,7 @@ static struct uart_driver sunsu_reg = { | |||
1178 | .major = TTY_MAJOR, | 1178 | .major = TTY_MAJOR, |
1179 | }; | 1179 | }; |
1180 | 1180 | ||
1181 | static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up) | 1181 | static int __devinit sunsu_kbd_ms_init(struct uart_sunsu_port *up) |
1182 | { | 1182 | { |
1183 | int quot, baud; | 1183 | int quot, baud; |
1184 | #ifdef CONFIG_SERIO | 1184 | #ifdef CONFIG_SERIO |
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index 7fb9b5c4669a..12d13d99b6f0 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c | |||
@@ -44,6 +44,7 @@ struct intc_handle_int { | |||
44 | struct intc_desc_int { | 44 | struct intc_desc_int { |
45 | struct list_head list; | 45 | struct list_head list; |
46 | struct sys_device sysdev; | 46 | struct sys_device sysdev; |
47 | pm_message_t state; | ||
47 | unsigned long *reg; | 48 | unsigned long *reg; |
48 | #ifdef CONFIG_SMP | 49 | #ifdef CONFIG_SMP |
49 | unsigned long *smp; | 50 | unsigned long *smp; |
@@ -786,18 +787,44 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state) | |||
786 | /* get intc controller associated with this sysdev */ | 787 | /* get intc controller associated with this sysdev */ |
787 | d = container_of(dev, struct intc_desc_int, sysdev); | 788 | d = container_of(dev, struct intc_desc_int, sysdev); |
788 | 789 | ||
789 | /* enable wakeup irqs belonging to this intc controller */ | 790 | switch (state.event) { |
790 | for_each_irq_desc(irq, desc) { | 791 | case PM_EVENT_ON: |
791 | if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip)) | 792 | if (d->state.event != PM_EVENT_FREEZE) |
792 | intc_enable(irq); | 793 | break; |
794 | for_each_irq_desc(irq, desc) { | ||
795 | if (desc->chip != &d->chip) | ||
796 | continue; | ||
797 | if (desc->status & IRQ_DISABLED) | ||
798 | intc_disable(irq); | ||
799 | else | ||
800 | intc_enable(irq); | ||
801 | } | ||
802 | break; | ||
803 | case PM_EVENT_FREEZE: | ||
804 | /* nothing has to be done */ | ||
805 | break; | ||
806 | case PM_EVENT_SUSPEND: | ||
807 | /* enable wakeup irqs belonging to this intc controller */ | ||
808 | for_each_irq_desc(irq, desc) { | ||
809 | if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip)) | ||
810 | intc_enable(irq); | ||
811 | } | ||
812 | break; | ||
793 | } | 813 | } |
814 | d->state = state; | ||
794 | 815 | ||
795 | return 0; | 816 | return 0; |
796 | } | 817 | } |
797 | 818 | ||
819 | static int intc_resume(struct sys_device *dev) | ||
820 | { | ||
821 | return intc_suspend(dev, PMSG_ON); | ||
822 | } | ||
823 | |||
798 | static struct sysdev_class intc_sysdev_class = { | 824 | static struct sysdev_class intc_sysdev_class = { |
799 | .name = "intc", | 825 | .name = "intc", |
800 | .suspend = intc_suspend, | 826 | .suspend = intc_suspend, |
827 | .resume = intc_resume, | ||
801 | }; | 828 | }; |
802 | 829 | ||
803 | /* register this intc as sysdev to allow suspend/resume */ | 830 | /* register this intc as sysdev to allow suspend/resume */ |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index c1688c71f052..c76feea5fe25 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c | |||
@@ -195,7 +195,7 @@ static void cs_deassert(struct driver_data *drv_data) | |||
195 | struct chip_data *chip = drv_data->cur_chip; | 195 | struct chip_data *chip = drv_data->cur_chip; |
196 | 196 | ||
197 | if (chip->cs_control) { | 197 | if (chip->cs_control) { |
198 | chip->cs_control(PXA2XX_CS_ASSERT); | 198 | chip->cs_control(PXA2XX_CS_DEASSERT); |
199 | return; | 199 | return; |
200 | } | 200 | } |
201 | 201 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 643908b74bc0..8eba98c8ed1e 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -658,7 +658,7 @@ int spi_write_then_read(struct spi_device *spi, | |||
658 | 658 | ||
659 | int status; | 659 | int status; |
660 | struct spi_message message; | 660 | struct spi_message message; |
661 | struct spi_transfer x; | 661 | struct spi_transfer x[2]; |
662 | u8 *local_buf; | 662 | u8 *local_buf; |
663 | 663 | ||
664 | /* Use preallocated DMA-safe buffer. We can't avoid copying here, | 664 | /* Use preallocated DMA-safe buffer. We can't avoid copying here, |
@@ -669,9 +669,15 @@ int spi_write_then_read(struct spi_device *spi, | |||
669 | return -EINVAL; | 669 | return -EINVAL; |
670 | 670 | ||
671 | spi_message_init(&message); | 671 | spi_message_init(&message); |
672 | memset(&x, 0, sizeof x); | 672 | memset(x, 0, sizeof x); |
673 | x.len = n_tx + n_rx; | 673 | if (n_tx) { |
674 | spi_message_add_tail(&x, &message); | 674 | x[0].len = n_tx; |
675 | spi_message_add_tail(&x[0], &message); | ||
676 | } | ||
677 | if (n_rx) { | ||
678 | x[1].len = n_rx; | ||
679 | spi_message_add_tail(&x[1], &message); | ||
680 | } | ||
675 | 681 | ||
676 | /* ... unless someone else is using the pre-allocated buffer */ | 682 | /* ... unless someone else is using the pre-allocated buffer */ |
677 | if (!mutex_trylock(&lock)) { | 683 | if (!mutex_trylock(&lock)) { |
@@ -682,15 +688,15 @@ int spi_write_then_read(struct spi_device *spi, | |||
682 | local_buf = buf; | 688 | local_buf = buf; |
683 | 689 | ||
684 | memcpy(local_buf, txbuf, n_tx); | 690 | memcpy(local_buf, txbuf, n_tx); |
685 | x.tx_buf = local_buf; | 691 | x[0].tx_buf = local_buf; |
686 | x.rx_buf = local_buf; | 692 | x[1].rx_buf = local_buf + n_tx; |
687 | 693 | ||
688 | /* do the i/o */ | 694 | /* do the i/o */ |
689 | status = spi_sync(spi, &message); | 695 | status = spi_sync(spi, &message); |
690 | if (status == 0) | 696 | if (status == 0) |
691 | memcpy(rxbuf, x.rx_buf + n_tx, n_rx); | 697 | memcpy(rxbuf, x[1].rx_buf, n_rx); |
692 | 698 | ||
693 | if (x.tx_buf == buf) | 699 | if (x[0].tx_buf == buf) |
694 | mutex_unlock(&lock); | 700 | mutex_unlock(&lock); |
695 | else | 701 | else |
696 | kfree(local_buf); | 702 | kfree(local_buf); |
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index 79e90fed27d3..299d29d1dadb 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c | |||
@@ -41,6 +41,8 @@ static int binder_last_id; | |||
41 | static struct proc_dir_entry *binder_proc_dir_entry_root; | 41 | static struct proc_dir_entry *binder_proc_dir_entry_root; |
42 | static struct proc_dir_entry *binder_proc_dir_entry_proc; | 42 | static struct proc_dir_entry *binder_proc_dir_entry_proc; |
43 | static struct hlist_head binder_dead_nodes; | 43 | static struct hlist_head binder_dead_nodes; |
44 | static HLIST_HEAD(binder_deferred_list); | ||
45 | static DEFINE_MUTEX(binder_deferred_lock); | ||
44 | 46 | ||
45 | static int binder_read_proc_proc( | 47 | static int binder_read_proc_proc( |
46 | char *page, char **start, off_t off, int count, int *eof, void *data); | 48 | char *page, char **start, off_t off, int count, int *eof, void *data); |
@@ -54,11 +56,7 @@ static int binder_read_proc_proc( | |||
54 | #define SZ_4M 0x400000 | 56 | #define SZ_4M 0x400000 |
55 | #endif | 57 | #endif |
56 | 58 | ||
57 | #ifndef __i386__ | ||
58 | #define FORBIDDEN_MMAP_FLAGS (VM_WRITE | VM_EXEC) | ||
59 | #else | ||
60 | #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) | 59 | #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) |
61 | #endif | ||
62 | 60 | ||
63 | #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) | 61 | #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) |
64 | 62 | ||
@@ -236,6 +234,12 @@ struct binder_buffer { | |||
236 | uint8_t data[0]; | 234 | uint8_t data[0]; |
237 | }; | 235 | }; |
238 | 236 | ||
237 | enum { | ||
238 | BINDER_DEFERRED_PUT_FILES = 0x01, | ||
239 | BINDER_DEFERRED_FLUSH = 0x02, | ||
240 | BINDER_DEFERRED_RELEASE = 0x04, | ||
241 | }; | ||
242 | |||
239 | struct binder_proc { | 243 | struct binder_proc { |
240 | struct hlist_node proc_node; | 244 | struct hlist_node proc_node; |
241 | struct rb_root threads; | 245 | struct rb_root threads; |
@@ -245,8 +249,11 @@ struct binder_proc { | |||
245 | int pid; | 249 | int pid; |
246 | struct vm_area_struct *vma; | 250 | struct vm_area_struct *vma; |
247 | struct task_struct *tsk; | 251 | struct task_struct *tsk; |
252 | struct files_struct *files; | ||
253 | struct hlist_node deferred_work_node; | ||
254 | int deferred_work; | ||
248 | void *buffer; | 255 | void *buffer; |
249 | size_t user_buffer_offset; | 256 | ptrdiff_t user_buffer_offset; |
250 | 257 | ||
251 | struct list_head buffers; | 258 | struct list_head buffers; |
252 | struct rb_root free_buffers; | 259 | struct rb_root free_buffers; |
@@ -310,12 +317,14 @@ struct binder_transaction { | |||
310 | uid_t sender_euid; | 317 | uid_t sender_euid; |
311 | }; | 318 | }; |
312 | 319 | ||
320 | static void binder_defer_work(struct binder_proc *proc, int defer); | ||
321 | |||
313 | /* | 322 | /* |
314 | * copied from get_unused_fd_flags | 323 | * copied from get_unused_fd_flags |
315 | */ | 324 | */ |
316 | int task_get_unused_fd_flags(struct task_struct *tsk, int flags) | 325 | int task_get_unused_fd_flags(struct binder_proc *proc, int flags) |
317 | { | 326 | { |
318 | struct files_struct *files = get_files_struct(tsk); | 327 | struct files_struct *files = proc->files; |
319 | int fd, error; | 328 | int fd, error; |
320 | struct fdtable *fdt; | 329 | struct fdtable *fdt; |
321 | unsigned long rlim_cur; | 330 | unsigned long rlim_cur; |
@@ -337,9 +346,9 @@ repeat: | |||
337 | * will limit the total number of files that can be opened. | 346 | * will limit the total number of files that can be opened. |
338 | */ | 347 | */ |
339 | rlim_cur = 0; | 348 | rlim_cur = 0; |
340 | if (lock_task_sighand(tsk, &irqs)) { | 349 | if (lock_task_sighand(proc->tsk, &irqs)) { |
341 | rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; | 350 | rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; |
342 | unlock_task_sighand(tsk, &irqs); | 351 | unlock_task_sighand(proc->tsk, &irqs); |
343 | } | 352 | } |
344 | if (fd >= rlim_cur) | 353 | if (fd >= rlim_cur) |
345 | goto out; | 354 | goto out; |
@@ -375,7 +384,6 @@ repeat: | |||
375 | 384 | ||
376 | out: | 385 | out: |
377 | spin_unlock(&files->file_lock); | 386 | spin_unlock(&files->file_lock); |
378 | put_files_struct(files); | ||
379 | return error; | 387 | return error; |
380 | } | 388 | } |
381 | 389 | ||
@@ -383,9 +391,9 @@ out: | |||
383 | * copied from fd_install | 391 | * copied from fd_install |
384 | */ | 392 | */ |
385 | static void task_fd_install( | 393 | static void task_fd_install( |
386 | struct task_struct *tsk, unsigned int fd, struct file *file) | 394 | struct binder_proc *proc, unsigned int fd, struct file *file) |
387 | { | 395 | { |
388 | struct files_struct *files = get_files_struct(tsk); | 396 | struct files_struct *files = proc->files; |
389 | struct fdtable *fdt; | 397 | struct fdtable *fdt; |
390 | 398 | ||
391 | if (files == NULL) | 399 | if (files == NULL) |
@@ -396,7 +404,6 @@ static void task_fd_install( | |||
396 | BUG_ON(fdt->fd[fd] != NULL); | 404 | BUG_ON(fdt->fd[fd] != NULL); |
397 | rcu_assign_pointer(fdt->fd[fd], file); | 405 | rcu_assign_pointer(fdt->fd[fd], file); |
398 | spin_unlock(&files->file_lock); | 406 | spin_unlock(&files->file_lock); |
399 | put_files_struct(files); | ||
400 | } | 407 | } |
401 | 408 | ||
402 | /* | 409 | /* |
@@ -413,10 +420,10 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd) | |||
413 | /* | 420 | /* |
414 | * copied from sys_close | 421 | * copied from sys_close |
415 | */ | 422 | */ |
416 | static long task_close_fd(struct task_struct *tsk, unsigned int fd) | 423 | static long task_close_fd(struct binder_proc *proc, unsigned int fd) |
417 | { | 424 | { |
418 | struct file *filp; | 425 | struct file *filp; |
419 | struct files_struct *files = get_files_struct(tsk); | 426 | struct files_struct *files = proc->files; |
420 | struct fdtable *fdt; | 427 | struct fdtable *fdt; |
421 | int retval; | 428 | int retval; |
422 | 429 | ||
@@ -443,12 +450,10 @@ static long task_close_fd(struct task_struct *tsk, unsigned int fd) | |||
443 | retval == -ERESTART_RESTARTBLOCK)) | 450 | retval == -ERESTART_RESTARTBLOCK)) |
444 | retval = -EINTR; | 451 | retval = -EINTR; |
445 | 452 | ||
446 | put_files_struct(files); | ||
447 | return retval; | 453 | return retval; |
448 | 454 | ||
449 | out_unlock: | 455 | out_unlock: |
450 | spin_unlock(&files->file_lock); | 456 | spin_unlock(&files->file_lock); |
451 | put_files_struct(files); | ||
452 | return -EBADF; | 457 | return -EBADF; |
453 | } | 458 | } |
454 | 459 | ||
@@ -618,7 +623,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, | |||
618 | proc->pid, page_addr); | 623 | proc->pid, page_addr); |
619 | goto err_map_kernel_failed; | 624 | goto err_map_kernel_failed; |
620 | } | 625 | } |
621 | user_page_addr = (size_t)page_addr + proc->user_buffer_offset; | 626 | user_page_addr = |
627 | (uintptr_t)page_addr + proc->user_buffer_offset; | ||
622 | ret = vm_insert_page(vma, user_page_addr, page[0]); | 628 | ret = vm_insert_page(vma, user_page_addr, page[0]); |
623 | if (ret) { | 629 | if (ret) { |
624 | printk(KERN_ERR "binder: %d: binder_alloc_buf failed " | 630 | printk(KERN_ERR "binder: %d: binder_alloc_buf failed " |
@@ -639,7 +645,7 @@ free_range: | |||
639 | page_addr -= PAGE_SIZE) { | 645 | page_addr -= PAGE_SIZE) { |
640 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; | 646 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; |
641 | if (vma) | 647 | if (vma) |
642 | zap_page_range(vma, (size_t)page_addr + | 648 | zap_page_range(vma, (uintptr_t)page_addr + |
643 | proc->user_buffer_offset, PAGE_SIZE, NULL); | 649 | proc->user_buffer_offset, PAGE_SIZE, NULL); |
644 | err_vm_insert_page_failed: | 650 | err_vm_insert_page_failed: |
645 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | 651 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); |
@@ -720,18 +726,19 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, | |||
720 | "er %p size %zd\n", proc->pid, size, buffer, buffer_size); | 726 | "er %p size %zd\n", proc->pid, size, buffer, buffer_size); |
721 | 727 | ||
722 | has_page_addr = | 728 | has_page_addr = |
723 | (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK); | 729 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); |
724 | if (n == NULL) { | 730 | if (n == NULL) { |
725 | if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) | 731 | if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) |
726 | buffer_size = size; /* no room for other buffers */ | 732 | buffer_size = size; /* no room for other buffers */ |
727 | else | 733 | else |
728 | buffer_size = size + sizeof(struct binder_buffer); | 734 | buffer_size = size + sizeof(struct binder_buffer); |
729 | } | 735 | } |
730 | end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data + buffer_size); | 736 | end_page_addr = |
737 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); | ||
731 | if (end_page_addr > has_page_addr) | 738 | if (end_page_addr > has_page_addr) |
732 | end_page_addr = has_page_addr; | 739 | end_page_addr = has_page_addr; |
733 | if (binder_update_page_range(proc, 1, | 740 | if (binder_update_page_range(proc, 1, |
734 | (void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL)) | 741 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) |
735 | return NULL; | 742 | return NULL; |
736 | 743 | ||
737 | rb_erase(best_fit, &proc->free_buffers); | 744 | rb_erase(best_fit, &proc->free_buffers); |
@@ -762,12 +769,12 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, | |||
762 | 769 | ||
763 | static void *buffer_start_page(struct binder_buffer *buffer) | 770 | static void *buffer_start_page(struct binder_buffer *buffer) |
764 | { | 771 | { |
765 | return (void *)((size_t)buffer & PAGE_MASK); | 772 | return (void *)((uintptr_t)buffer & PAGE_MASK); |
766 | } | 773 | } |
767 | 774 | ||
768 | static void *buffer_end_page(struct binder_buffer *buffer) | 775 | static void *buffer_end_page(struct binder_buffer *buffer) |
769 | { | 776 | { |
770 | return (void *)(((size_t)(buffer + 1) - 1) & PAGE_MASK); | 777 | return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); |
771 | } | 778 | } |
772 | 779 | ||
773 | static void binder_delete_free_buffer( | 780 | static void binder_delete_free_buffer( |
@@ -845,8 +852,8 @@ static void binder_free_buf( | |||
845 | } | 852 | } |
846 | 853 | ||
847 | binder_update_page_range(proc, 0, | 854 | binder_update_page_range(proc, 0, |
848 | (void *)PAGE_ALIGN((size_t)buffer->data), | 855 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), |
849 | (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK), | 856 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), |
850 | NULL); | 857 | NULL); |
851 | rb_erase(&buffer->rb_node, &proc->allocated_buffers); | 858 | rb_erase(&buffer->rb_node, &proc->allocated_buffers); |
852 | buffer->free = 1; | 859 | buffer->free = 1; |
@@ -1345,6 +1352,17 @@ binder_transaction(struct binder_proc *proc, struct binder_thread *thread, | |||
1345 | if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { | 1352 | if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { |
1346 | struct binder_transaction *tmp; | 1353 | struct binder_transaction *tmp; |
1347 | tmp = thread->transaction_stack; | 1354 | tmp = thread->transaction_stack; |
1355 | if (tmp->to_thread != thread) { | ||
1356 | binder_user_error("binder: %d:%d got new " | ||
1357 | "transaction with bad transaction stack" | ||
1358 | ", transaction %d has target %d:%d\n", | ||
1359 | proc->pid, thread->pid, tmp->debug_id, | ||
1360 | tmp->to_proc ? tmp->to_proc->pid : 0, | ||
1361 | tmp->to_thread ? | ||
1362 | tmp->to_thread->pid : 0); | ||
1363 | return_error = BR_FAILED_REPLY; | ||
1364 | goto err_bad_call_stack; | ||
1365 | } | ||
1348 | while (tmp) { | 1366 | while (tmp) { |
1349 | if (tmp->from && tmp->from->proc == target_proc) | 1367 | if (tmp->from && tmp->from->proc == target_proc) |
1350 | target_thread = tmp->from; | 1368 | target_thread = tmp->from; |
@@ -1434,10 +1452,19 @@ binder_transaction(struct binder_proc *proc, struct binder_thread *thread, | |||
1434 | return_error = BR_FAILED_REPLY; | 1452 | return_error = BR_FAILED_REPLY; |
1435 | goto err_copy_data_failed; | 1453 | goto err_copy_data_failed; |
1436 | } | 1454 | } |
1455 | if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { | ||
1456 | binder_user_error("binder: %d:%d got transaction with " | ||
1457 | "invalid offsets size, %zd\n", | ||
1458 | proc->pid, thread->pid, tr->offsets_size); | ||
1459 | return_error = BR_FAILED_REPLY; | ||
1460 | goto err_bad_offset; | ||
1461 | } | ||
1437 | off_end = (void *)offp + tr->offsets_size; | 1462 | off_end = (void *)offp + tr->offsets_size; |
1438 | for (; offp < off_end; offp++) { | 1463 | for (; offp < off_end; offp++) { |
1439 | struct flat_binder_object *fp; | 1464 | struct flat_binder_object *fp; |
1440 | if (*offp > t->buffer->data_size - sizeof(*fp)) { | 1465 | if (*offp > t->buffer->data_size - sizeof(*fp) || |
1466 | t->buffer->data_size < sizeof(*fp) || | ||
1467 | !IS_ALIGNED(*offp, sizeof(void *))) { | ||
1441 | binder_user_error("binder: %d:%d got transaction with " | 1468 | binder_user_error("binder: %d:%d got transaction with " |
1442 | "invalid offset, %zd\n", | 1469 | "invalid offset, %zd\n", |
1443 | proc->pid, thread->pid, *offp); | 1470 | proc->pid, thread->pid, *offp); |
@@ -1544,13 +1571,13 @@ binder_transaction(struct binder_proc *proc, struct binder_thread *thread, | |||
1544 | return_error = BR_FAILED_REPLY; | 1571 | return_error = BR_FAILED_REPLY; |
1545 | goto err_fget_failed; | 1572 | goto err_fget_failed; |
1546 | } | 1573 | } |
1547 | target_fd = task_get_unused_fd_flags(target_proc->tsk, O_CLOEXEC); | 1574 | target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); |
1548 | if (target_fd < 0) { | 1575 | if (target_fd < 0) { |
1549 | fput(file); | 1576 | fput(file); |
1550 | return_error = BR_FAILED_REPLY; | 1577 | return_error = BR_FAILED_REPLY; |
1551 | goto err_get_unused_fd_failed; | 1578 | goto err_get_unused_fd_failed; |
1552 | } | 1579 | } |
1553 | task_fd_install(target_proc->tsk, target_fd, file); | 1580 | task_fd_install(target_proc, target_fd, file); |
1554 | if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) | 1581 | if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
1555 | printk(KERN_INFO " fd %ld -> %d\n", fp->handle, target_fd); | 1582 | printk(KERN_INFO " fd %ld -> %d\n", fp->handle, target_fd); |
1556 | /* TODO: fput? */ | 1583 | /* TODO: fput? */ |
@@ -1655,7 +1682,9 @@ binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer | |||
1655 | off_end = (void *)offp + buffer->offsets_size; | 1682 | off_end = (void *)offp + buffer->offsets_size; |
1656 | for (; offp < off_end; offp++) { | 1683 | for (; offp < off_end; offp++) { |
1657 | struct flat_binder_object *fp; | 1684 | struct flat_binder_object *fp; |
1658 | if (*offp > buffer->data_size - sizeof(*fp)) { | 1685 | if (*offp > buffer->data_size - sizeof(*fp) || |
1686 | buffer->data_size < sizeof(*fp) || | ||
1687 | !IS_ALIGNED(*offp, sizeof(void *))) { | ||
1659 | printk(KERN_ERR "binder: transaction release %d bad" | 1688 | printk(KERN_ERR "binder: transaction release %d bad" |
1660 | "offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); | 1689 | "offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); |
1661 | continue; | 1690 | continue; |
@@ -1691,7 +1720,7 @@ binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer | |||
1691 | if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) | 1720 | if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
1692 | printk(KERN_INFO " fd %ld\n", fp->handle); | 1721 | printk(KERN_INFO " fd %ld\n", fp->handle); |
1693 | if (failed_at) | 1722 | if (failed_at) |
1694 | task_close_fd(proc->tsk, fp->handle); | 1723 | task_close_fd(proc, fp->handle); |
1695 | break; | 1724 | break; |
1696 | 1725 | ||
1697 | default: | 1726 | default: |
@@ -2340,7 +2369,7 @@ retry: | |||
2340 | 2369 | ||
2341 | tr.data_size = t->buffer->data_size; | 2370 | tr.data_size = t->buffer->data_size; |
2342 | tr.offsets_size = t->buffer->offsets_size; | 2371 | tr.offsets_size = t->buffer->offsets_size; |
2343 | tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset); | 2372 | tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset; |
2344 | tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); | 2373 | tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); |
2345 | 2374 | ||
2346 | if (put_user(cmd, (uint32_t __user *)ptr)) | 2375 | if (put_user(cmd, (uint32_t __user *)ptr)) |
@@ -2656,6 +2685,7 @@ static void binder_vma_open(struct vm_area_struct *vma) | |||
2656 | (unsigned long)pgprot_val(vma->vm_page_prot)); | 2685 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
2657 | dump_stack(); | 2686 | dump_stack(); |
2658 | } | 2687 | } |
2688 | |||
2659 | static void binder_vma_close(struct vm_area_struct *vma) | 2689 | static void binder_vma_close(struct vm_area_struct *vma) |
2660 | { | 2690 | { |
2661 | struct binder_proc *proc = vma->vm_private_data; | 2691 | struct binder_proc *proc = vma->vm_private_data; |
@@ -2666,6 +2696,7 @@ static void binder_vma_close(struct vm_area_struct *vma) | |||
2666 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, | 2696 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
2667 | (unsigned long)pgprot_val(vma->vm_page_prot)); | 2697 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
2668 | proc->vma = NULL; | 2698 | proc->vma = NULL; |
2699 | binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); | ||
2669 | } | 2700 | } |
2670 | 2701 | ||
2671 | static struct vm_operations_struct binder_vm_ops = { | 2702 | static struct vm_operations_struct binder_vm_ops = { |
@@ -2698,6 +2729,12 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) | |||
2698 | } | 2729 | } |
2699 | vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; | 2730 | vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; |
2700 | 2731 | ||
2732 | if (proc->buffer) { | ||
2733 | ret = -EBUSY; | ||
2734 | failure_string = "already mapped"; | ||
2735 | goto err_already_mapped; | ||
2736 | } | ||
2737 | |||
2701 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); | 2738 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); |
2702 | if (area == NULL) { | 2739 | if (area == NULL) { |
2703 | ret = -ENOMEM; | 2740 | ret = -ENOMEM; |
@@ -2705,7 +2742,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) | |||
2705 | goto err_get_vm_area_failed; | 2742 | goto err_get_vm_area_failed; |
2706 | } | 2743 | } |
2707 | proc->buffer = area->addr; | 2744 | proc->buffer = area->addr; |
2708 | proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer; | 2745 | proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; |
2709 | 2746 | ||
2710 | #ifdef CONFIG_CPU_CACHE_VIPT | 2747 | #ifdef CONFIG_CPU_CACHE_VIPT |
2711 | if (cache_is_vipt_aliasing()) { | 2748 | if (cache_is_vipt_aliasing()) { |
@@ -2738,6 +2775,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) | |||
2738 | binder_insert_free_buffer(proc, buffer); | 2775 | binder_insert_free_buffer(proc, buffer); |
2739 | proc->free_async_space = proc->buffer_size / 2; | 2776 | proc->free_async_space = proc->buffer_size / 2; |
2740 | barrier(); | 2777 | barrier(); |
2778 | proc->files = get_files_struct(current); | ||
2741 | proc->vma = vma; | 2779 | proc->vma = vma; |
2742 | 2780 | ||
2743 | /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ | 2781 | /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ |
@@ -2745,10 +2783,12 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) | |||
2745 | 2783 | ||
2746 | err_alloc_small_buf_failed: | 2784 | err_alloc_small_buf_failed: |
2747 | kfree(proc->pages); | 2785 | kfree(proc->pages); |
2786 | proc->pages = NULL; | ||
2748 | err_alloc_pages_failed: | 2787 | err_alloc_pages_failed: |
2749 | vfree(proc->buffer); | 2788 | vfree(proc->buffer); |
2789 | proc->buffer = NULL; | ||
2750 | err_get_vm_area_failed: | 2790 | err_get_vm_area_failed: |
2751 | mutex_unlock(&binder_lock); | 2791 | err_already_mapped: |
2752 | err_bad_arg: | 2792 | err_bad_arg: |
2753 | printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); | 2793 | printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); |
2754 | return ret; | 2794 | return ret; |
@@ -2780,6 +2820,7 @@ static int binder_open(struct inode *nodp, struct file *filp) | |||
2780 | if (binder_proc_dir_entry_proc) { | 2820 | if (binder_proc_dir_entry_proc) { |
2781 | char strbuf[11]; | 2821 | char strbuf[11]; |
2782 | snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); | 2822 | snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); |
2823 | remove_proc_entry(strbuf, binder_proc_dir_entry_proc); | ||
2783 | create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc); | 2824 | create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc); |
2784 | } | 2825 | } |
2785 | 2826 | ||
@@ -2788,11 +2829,17 @@ static int binder_open(struct inode *nodp, struct file *filp) | |||
2788 | 2829 | ||
2789 | static int binder_flush(struct file *filp, fl_owner_t id) | 2830 | static int binder_flush(struct file *filp, fl_owner_t id) |
2790 | { | 2831 | { |
2791 | struct rb_node *n; | ||
2792 | struct binder_proc *proc = filp->private_data; | 2832 | struct binder_proc *proc = filp->private_data; |
2793 | int wake_count = 0; | ||
2794 | 2833 | ||
2795 | mutex_lock(&binder_lock); | 2834 | binder_defer_work(proc, BINDER_DEFERRED_FLUSH); |
2835 | |||
2836 | return 0; | ||
2837 | } | ||
2838 | |||
2839 | static void binder_deferred_flush(struct binder_proc *proc) | ||
2840 | { | ||
2841 | struct rb_node *n; | ||
2842 | int wake_count = 0; | ||
2796 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { | 2843 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { |
2797 | struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); | 2844 | struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); |
2798 | thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; | 2845 | thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; |
@@ -2802,28 +2849,35 @@ static int binder_flush(struct file *filp, fl_owner_t id) | |||
2802 | } | 2849 | } |
2803 | } | 2850 | } |
2804 | wake_up_interruptible_all(&proc->wait); | 2851 | wake_up_interruptible_all(&proc->wait); |
2805 | mutex_unlock(&binder_lock); | ||
2806 | 2852 | ||
2807 | if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) | 2853 | if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) |
2808 | printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count); | 2854 | printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count); |
2809 | |||
2810 | return 0; | ||
2811 | } | 2855 | } |
2812 | 2856 | ||
2813 | static int binder_release(struct inode *nodp, struct file *filp) | 2857 | static int binder_release(struct inode *nodp, struct file *filp) |
2814 | { | 2858 | { |
2815 | struct hlist_node *pos; | ||
2816 | struct binder_transaction *t; | ||
2817 | struct rb_node *n; | ||
2818 | struct binder_proc *proc = filp->private_data; | 2859 | struct binder_proc *proc = filp->private_data; |
2819 | int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; | ||
2820 | |||
2821 | if (binder_proc_dir_entry_proc) { | 2860 | if (binder_proc_dir_entry_proc) { |
2822 | char strbuf[11]; | 2861 | char strbuf[11]; |
2823 | snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); | 2862 | snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); |
2824 | remove_proc_entry(strbuf, binder_proc_dir_entry_proc); | 2863 | remove_proc_entry(strbuf, binder_proc_dir_entry_proc); |
2825 | } | 2864 | } |
2826 | mutex_lock(&binder_lock); | 2865 | |
2866 | binder_defer_work(proc, BINDER_DEFERRED_RELEASE); | ||
2867 | |||
2868 | return 0; | ||
2869 | } | ||
2870 | |||
2871 | static void binder_deferred_release(struct binder_proc *proc) | ||
2872 | { | ||
2873 | struct hlist_node *pos; | ||
2874 | struct binder_transaction *t; | ||
2875 | struct rb_node *n; | ||
2876 | int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; | ||
2877 | |||
2878 | BUG_ON(proc->vma); | ||
2879 | BUG_ON(proc->files); | ||
2880 | |||
2827 | hlist_del(&proc->proc_node); | 2881 | hlist_del(&proc->proc_node); |
2828 | if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { | 2882 | if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { |
2829 | if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) | 2883 | if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) |
@@ -2897,7 +2951,6 @@ static int binder_release(struct inode *nodp, struct file *filp) | |||
2897 | } | 2951 | } |
2898 | 2952 | ||
2899 | binder_stats.obj_deleted[BINDER_STAT_PROC]++; | 2953 | binder_stats.obj_deleted[BINDER_STAT_PROC]++; |
2900 | mutex_unlock(&binder_lock); | ||
2901 | 2954 | ||
2902 | page_count = 0; | 2955 | page_count = 0; |
2903 | if (proc->pages) { | 2956 | if (proc->pages) { |
@@ -2921,7 +2974,57 @@ static int binder_release(struct inode *nodp, struct file *filp) | |||
2921 | proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); | 2974 | proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); |
2922 | 2975 | ||
2923 | kfree(proc); | 2976 | kfree(proc); |
2924 | return 0; | 2977 | } |
2978 | |||
2979 | static void binder_deferred_func(struct work_struct *work) | ||
2980 | { | ||
2981 | struct binder_proc *proc; | ||
2982 | struct files_struct *files; | ||
2983 | |||
2984 | int defer; | ||
2985 | do { | ||
2986 | mutex_lock(&binder_lock); | ||
2987 | mutex_lock(&binder_deferred_lock); | ||
2988 | if (!hlist_empty(&binder_deferred_list)) { | ||
2989 | proc = hlist_entry(binder_deferred_list.first, | ||
2990 | struct binder_proc, deferred_work_node); | ||
2991 | hlist_del_init(&proc->deferred_work_node); | ||
2992 | defer = proc->deferred_work; | ||
2993 | proc->deferred_work = 0; | ||
2994 | } else { | ||
2995 | proc = NULL; | ||
2996 | defer = 0; | ||
2997 | } | ||
2998 | mutex_unlock(&binder_deferred_lock); | ||
2999 | |||
3000 | files = NULL; | ||
3001 | if (defer & BINDER_DEFERRED_PUT_FILES) | ||
3002 | if ((files = proc->files)) | ||
3003 | proc->files = NULL; | ||
3004 | |||
3005 | if (defer & BINDER_DEFERRED_FLUSH) | ||
3006 | binder_deferred_flush(proc); | ||
3007 | |||
3008 | if (defer & BINDER_DEFERRED_RELEASE) | ||
3009 | binder_deferred_release(proc); /* frees proc */ | ||
3010 | |||
3011 | mutex_unlock(&binder_lock); | ||
3012 | if (files) | ||
3013 | put_files_struct(files); | ||
3014 | } while (proc); | ||
3015 | } | ||
3016 | static DECLARE_WORK(binder_deferred_work, binder_deferred_func); | ||
3017 | |||
3018 | static void binder_defer_work(struct binder_proc *proc, int defer) | ||
3019 | { | ||
3020 | mutex_lock(&binder_deferred_lock); | ||
3021 | proc->deferred_work |= defer; | ||
3022 | if (hlist_unhashed(&proc->deferred_work_node)) { | ||
3023 | hlist_add_head(&proc->deferred_work_node, | ||
3024 | &binder_deferred_list); | ||
3025 | schedule_work(&binder_deferred_work); | ||
3026 | } | ||
3027 | mutex_unlock(&binder_deferred_lock); | ||
2925 | } | 3028 | } |
2926 | 3029 | ||
2927 | static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t) | 3030 | static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t) |
diff --git a/drivers/staging/at76_usb/at76_usb.c b/drivers/staging/at76_usb/at76_usb.c index 6f6e36a3bd9f..c8af9a868d62 100644 --- a/drivers/staging/at76_usb/at76_usb.c +++ b/drivers/staging/at76_usb/at76_usb.c | |||
@@ -5259,6 +5259,18 @@ static int at76_alloc_urbs(struct at76_priv *priv, | |||
5259 | return 0; | 5259 | return 0; |
5260 | } | 5260 | } |
5261 | 5261 | ||
5262 | static const struct net_device_ops at76_netdev_ops = { | ||
5263 | .ndo_open = at76_open, | ||
5264 | .ndo_stop = at76_stop, | ||
5265 | .ndo_get_stats = at76_get_stats, | ||
5266 | .ndo_start_xmit = at76_tx, | ||
5267 | .ndo_tx_timeout = at76_tx_timeout, | ||
5268 | .ndo_set_multicast_list = at76_set_multicast, | ||
5269 | .ndo_set_mac_address = at76_set_mac_address, | ||
5270 | .ndo_validate_addr = eth_validate_addr, | ||
5271 | .ndo_change_mtu = eth_change_mtu, | ||
5272 | }; | ||
5273 | |||
5262 | /* Register network device and initialize the hardware */ | 5274 | /* Register network device and initialize the hardware */ |
5263 | static int at76_init_new_device(struct at76_priv *priv, | 5275 | static int at76_init_new_device(struct at76_priv *priv, |
5264 | struct usb_interface *interface) | 5276 | struct usb_interface *interface) |
@@ -5303,21 +5315,15 @@ static int at76_init_new_device(struct at76_priv *priv, | |||
5303 | priv->scan_mode = SCAN_TYPE_ACTIVE; | 5315 | priv->scan_mode = SCAN_TYPE_ACTIVE; |
5304 | 5316 | ||
5305 | netdev->flags &= ~IFF_MULTICAST; /* not yet or never */ | 5317 | netdev->flags &= ~IFF_MULTICAST; /* not yet or never */ |
5306 | netdev->open = at76_open; | 5318 | netdev->netdev_ops = &at76_netdev_ops; |
5307 | netdev->stop = at76_stop; | ||
5308 | netdev->get_stats = at76_get_stats; | ||
5309 | netdev->ethtool_ops = &at76_ethtool_ops; | 5319 | netdev->ethtool_ops = &at76_ethtool_ops; |
5310 | 5320 | ||
5311 | /* Add pointers to enable iwspy support. */ | 5321 | /* Add pointers to enable iwspy support. */ |
5312 | priv->wireless_data.spy_data = &priv->spy_data; | 5322 | priv->wireless_data.spy_data = &priv->spy_data; |
5313 | netdev->wireless_data = &priv->wireless_data; | 5323 | netdev->wireless_data = &priv->wireless_data; |
5314 | 5324 | ||
5315 | netdev->hard_start_xmit = at76_tx; | ||
5316 | netdev->tx_timeout = at76_tx_timeout; | ||
5317 | netdev->watchdog_timeo = 2 * HZ; | 5325 | netdev->watchdog_timeo = 2 * HZ; |
5318 | netdev->wireless_handlers = &at76_handler_def; | 5326 | netdev->wireless_handlers = &at76_handler_def; |
5319 | netdev->set_multicast_list = at76_set_multicast; | ||
5320 | netdev->set_mac_address = at76_set_mac_address; | ||
5321 | dev_alloc_name(netdev, "wlan%d"); | 5327 | dev_alloc_name(netdev, "wlan%d"); |
5322 | 5328 | ||
5323 | ret = register_netdev(priv->netdev); | 5329 | ret = register_netdev(priv->netdev); |
diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c index 0348072b3ab5..75ebe338c6f2 100644 --- a/drivers/staging/b3dfg/b3dfg.c +++ b/drivers/staging/b3dfg/b3dfg.c | |||
@@ -1000,7 +1000,7 @@ static int __devinit b3dfg_probe(struct pci_dev *pdev, | |||
1000 | 1000 | ||
1001 | pci_set_master(pdev); | 1001 | pci_set_master(pdev); |
1002 | 1002 | ||
1003 | r = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 1003 | r = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
1004 | if (r) { | 1004 | if (r) { |
1005 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | 1005 | dev_err(&pdev->dev, "no usable DMA configuration\n"); |
1006 | goto err_free_res; | 1006 | goto err_free_res; |
diff --git a/drivers/staging/epl/VirtualEthernetLinux.c b/drivers/staging/epl/VirtualEthernetLinux.c index 21206c4d22ff..077724a556cc 100644 --- a/drivers/staging/epl/VirtualEthernetLinux.c +++ b/drivers/staging/epl/VirtualEthernetLinux.c | |||
@@ -284,6 +284,17 @@ static tEplKernel VEthRecvFrame(tEplFrameInfo * pFrameInfo_p) | |||
284 | return Ret; | 284 | return Ret; |
285 | } | 285 | } |
286 | 286 | ||
287 | static const struct net_device_ops epl_netdev_ops = { | ||
288 | .ndo_open = VEthOpen, | ||
289 | .ndo_stop = VEthClose, | ||
290 | .ndo_get_stats = VEthGetStats, | ||
291 | .ndo_start_xmit = VEthXmit, | ||
292 | .ndo_tx_timeout = VEthTimeout, | ||
293 | .ndo_change_mtu = eth_change_mtu, | ||
294 | .ndo_set_mac_address = eth_mac_addr, | ||
295 | .ndo_validate_addr = eth_validate_addr, | ||
296 | }; | ||
297 | |||
287 | tEplKernel VEthAddInstance(tEplDllkInitParam *pInitParam_p) | 298 | tEplKernel VEthAddInstance(tEplDllkInitParam *pInitParam_p) |
288 | { | 299 | { |
289 | tEplKernel Ret = kEplSuccessful; | 300 | tEplKernel Ret = kEplSuccessful; |
@@ -299,11 +310,7 @@ tEplKernel VEthAddInstance(tEplDllkInitParam *pInitParam_p) | |||
299 | goto Exit; | 310 | goto Exit; |
300 | } | 311 | } |
301 | 312 | ||
302 | pVEthNetDevice_g->open = VEthOpen; | 313 | pVEthNetDevice_g->netdev_ops = &epl_netdev_ops; |
303 | pVEthNetDevice_g->stop = VEthClose; | ||
304 | pVEthNetDevice_g->get_stats = VEthGetStats; | ||
305 | pVEthNetDevice_g->hard_start_xmit = VEthXmit; | ||
306 | pVEthNetDevice_g->tx_timeout = VEthTimeout; | ||
307 | pVEthNetDevice_g->watchdog_timeo = EPL_VETH_TX_TIMEOUT; | 314 | pVEthNetDevice_g->watchdog_timeo = EPL_VETH_TX_TIMEOUT; |
308 | pVEthNetDevice_g->destructor = free_netdev; | 315 | pVEthNetDevice_g->destructor = free_netdev; |
309 | 316 | ||
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c index de65972ff362..951c73d5db20 100644 --- a/drivers/staging/et131x/et131x_netdev.c +++ b/drivers/staging/et131x/et131x_netdev.c | |||
@@ -112,6 +112,19 @@ void et131x_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); | |||
112 | void et131x_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); | 112 | void et131x_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); |
113 | void et131x_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); | 113 | void et131x_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); |
114 | 114 | ||
115 | static const struct net_device_ops et131x_netdev_ops = { | ||
116 | .ndo_open = et131x_open, | ||
117 | .ndo_stop = et131x_close, | ||
118 | .ndo_start_xmit = et131x_tx, | ||
119 | .ndo_set_multicast_list = et131x_multicast, | ||
120 | .ndo_tx_timeout = et131x_tx_timeout, | ||
121 | .ndo_change_mtu = et131x_change_mtu, | ||
122 | .ndo_set_mac_address = et131x_set_mac_addr, | ||
123 | .ndo_validate_addr = eth_validate_addr, | ||
124 | .ndo_get_stats = et131x_stats, | ||
125 | .ndo_do_ioctl = et131x_ioctl, | ||
126 | }; | ||
127 | |||
115 | /** | 128 | /** |
116 | * et131x_device_alloc | 129 | * et131x_device_alloc |
117 | * | 130 | * |
@@ -142,16 +155,8 @@ struct net_device *et131x_device_alloc(void) | |||
142 | */ | 155 | */ |
143 | //netdev->init = &et131x_init; | 156 | //netdev->init = &et131x_init; |
144 | //netdev->set_config = &et131x_config; | 157 | //netdev->set_config = &et131x_config; |
145 | netdev->get_stats = &et131x_stats; | ||
146 | netdev->open = &et131x_open; | ||
147 | netdev->stop = &et131x_close; | ||
148 | netdev->do_ioctl = &et131x_ioctl; | ||
149 | netdev->set_multicast_list = &et131x_multicast; | ||
150 | netdev->hard_start_xmit = &et131x_tx; | ||
151 | netdev->tx_timeout = &et131x_tx_timeout; | ||
152 | netdev->watchdog_timeo = ET131X_TX_TIMEOUT; | 158 | netdev->watchdog_timeo = ET131X_TX_TIMEOUT; |
153 | netdev->change_mtu = &et131x_change_mtu; | 159 | netdev->netdev_ops = &et131x_netdev_ops; |
154 | netdev->set_mac_address = &et131x_set_mac_addr; | ||
155 | 160 | ||
156 | //netdev->ethtool_ops = &et131x_ethtool_ops; | 161 | //netdev->ethtool_ops = &et131x_ethtool_ops; |
157 | 162 | ||
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c index 58bfc8d81b3b..f47c0ce2849a 100644 --- a/drivers/staging/go7007/go7007-driver.c +++ b/drivers/staging/go7007/go7007-driver.c | |||
@@ -268,21 +268,6 @@ int go7007_register_encoder(struct go7007 *go) | |||
268 | init_i2c_module(&go->i2c_adapter, | 268 | init_i2c_module(&go->i2c_adapter, |
269 | go->board_info->i2c_devs[i].id, | 269 | go->board_info->i2c_devs[i].id, |
270 | go->board_info->i2c_devs[i].addr); | 270 | go->board_info->i2c_devs[i].addr); |
271 | #ifdef TUNER_SET_TYPE_ADDR | ||
272 | if (go->tuner_type >= 0) { | ||
273 | struct tuner_setup tun_setup = { | ||
274 | .mode_mask = T_ANALOG_TV, | ||
275 | .addr = ADDR_UNSET, | ||
276 | .type = go->tuner_type | ||
277 | }; | ||
278 | i2c_clients_command(&go->i2c_adapter, | ||
279 | TUNER_SET_TYPE_ADDR, &tun_setup); | ||
280 | } | ||
281 | #else | ||
282 | if (go->tuner_type >= 0) | ||
283 | i2c_clients_command(&go->i2c_adapter, | ||
284 | TUNER_SET_TYPE, &go->tuner_type); | ||
285 | #endif | ||
286 | if (go->board_id == GO7007_BOARDID_ADLINK_MPG24) | 271 | if (go->board_id == GO7007_BOARDID_ADLINK_MPG24) |
287 | i2c_clients_command(&go->i2c_adapter, | 272 | i2c_clients_command(&go->i2c_adapter, |
288 | DECODER_SET_CHANNEL, &go->channel_number); | 273 | DECODER_SET_CHANNEL, &go->channel_number); |
diff --git a/drivers/staging/go7007/wis-sony-tuner.c b/drivers/staging/go7007/wis-sony-tuner.c index 58fddb122372..0a7eeef7c008 100644 --- a/drivers/staging/go7007/wis-sony-tuner.c +++ b/drivers/staging/go7007/wis-sony-tuner.c | |||
@@ -386,6 +386,7 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg) | |||
386 | struct wis_sony_tuner *t = i2c_get_clientdata(client); | 386 | struct wis_sony_tuner *t = i2c_get_clientdata(client); |
387 | 387 | ||
388 | switch (cmd) { | 388 | switch (cmd) { |
389 | #if 0 | ||
389 | #ifdef TUNER_SET_TYPE_ADDR | 390 | #ifdef TUNER_SET_TYPE_ADDR |
390 | case TUNER_SET_TYPE_ADDR: | 391 | case TUNER_SET_TYPE_ADDR: |
391 | { | 392 | { |
@@ -463,6 +464,7 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg) | |||
463 | t->type, sony_tuners[t->type - 200].name); | 464 | t->type, sony_tuners[t->type - 200].name); |
464 | break; | 465 | break; |
465 | } | 466 | } |
467 | #endif | ||
466 | case VIDIOC_G_FREQUENCY: | 468 | case VIDIOC_G_FREQUENCY: |
467 | { | 469 | { |
468 | struct v4l2_frequency *f = arg; | 470 | struct v4l2_frequency *f = arg; |
diff --git a/drivers/staging/line6/audio.c b/drivers/staging/line6/audio.c index 3aa946899ced..e2ac8d60f8c2 100644 --- a/drivers/staging/line6/audio.c +++ b/drivers/staging/line6/audio.c | |||
@@ -27,11 +27,12 @@ int line6_init_audio(struct usb_line6 *line6) | |||
27 | { | 27 | { |
28 | static int dev; | 28 | static int dev; |
29 | struct snd_card *card; | 29 | struct snd_card *card; |
30 | int err; | ||
30 | 31 | ||
31 | card = snd_card_new(line6_index[dev], line6_id[dev], THIS_MODULE, 0); | 32 | err = snd_card_create(line6_index[dev], line6_id[dev], THIS_MODULE, 0, |
32 | 33 | &card); | |
33 | if (card == NULL) | 34 | if (err < 0) |
34 | return -ENOMEM; | 35 | return err; |
35 | 36 | ||
36 | line6->card = card; | 37 | line6->card = card; |
37 | 38 | ||
diff --git a/drivers/staging/otus/usbdrv.c b/drivers/staging/otus/usbdrv.c index 565a839589f5..540cbbb826f9 100644 --- a/drivers/staging/otus/usbdrv.c +++ b/drivers/staging/otus/usbdrv.c | |||
@@ -822,6 +822,21 @@ int zfLnxVapXmitFrame(struct sk_buff *skb, struct net_device *dev) | |||
822 | return 0; | 822 | return 0; |
823 | } | 823 | } |
824 | 824 | ||
825 | static const struct net_device_ops vap_netdev_ops = { | ||
826 | .ndo_open = zfLnxVapOpen, | ||
827 | .ndo_stop = zfLnxVapClose, | ||
828 | .ndo_start_xmit = zfLnxVapXmitFrame, | ||
829 | .ndo_get_stats = usbdrv_get_stats, | ||
830 | .ndo_change_mtu = usbdrv_change_mtu, | ||
831 | .ndo_validate_addr = eth_validate_addr, | ||
832 | .ndo_set_mac_address = eth_mac_addr, | ||
833 | #ifdef ZM_HOSTAPD_SUPPORT | ||
834 | .ndo_do_ioctl = usbdrv_ioctl, | ||
835 | #else | ||
836 | .ndo_do_ioctl = NULL, | ||
837 | #endif | ||
838 | }; | ||
839 | |||
825 | int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId) | 840 | int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId) |
826 | { | 841 | { |
827 | /* Allocate net device structure */ | 842 | /* Allocate net device structure */ |
@@ -846,16 +861,7 @@ int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId) | |||
846 | vap[vapId].dev->ml_priv = parentDev->ml_priv; | 861 | vap[vapId].dev->ml_priv = parentDev->ml_priv; |
847 | 862 | ||
848 | //dev->hard_start_xmit = &zd1212_wds_xmit_frame; | 863 | //dev->hard_start_xmit = &zd1212_wds_xmit_frame; |
849 | vap[vapId].dev->hard_start_xmit = &zfLnxVapXmitFrame; | 864 | vap[vapId].dev->netdev_ops = &vap_netdev_ops; |
850 | vap[vapId].dev->open = &zfLnxVapOpen; | ||
851 | vap[vapId].dev->stop = &zfLnxVapClose; | ||
852 | vap[vapId].dev->get_stats = &usbdrv_get_stats; | ||
853 | vap[vapId].dev->change_mtu = &usbdrv_change_mtu; | ||
854 | #ifdef ZM_HOSTAPD_SUPPORT | ||
855 | vap[vapId].dev->do_ioctl = usbdrv_ioctl; | ||
856 | #else | ||
857 | vap[vapId].dev->do_ioctl = NULL; | ||
858 | #endif | ||
859 | vap[vapId].dev->destructor = free_netdev; | 865 | vap[vapId].dev->destructor = free_netdev; |
860 | 866 | ||
861 | vap[vapId].dev->tx_queue_len = 0; | 867 | vap[vapId].dev->tx_queue_len = 0; |
@@ -1068,6 +1074,18 @@ void zfLnxUnlinkAllUrbs(struct usbdrv_private *macp) | |||
1068 | usb_unlink_urb(macp->RegInUrb); | 1074 | usb_unlink_urb(macp->RegInUrb); |
1069 | } | 1075 | } |
1070 | 1076 | ||
1077 | static const struct net_device_ops otus_netdev_ops = { | ||
1078 | .ndo_open = usbdrv_open, | ||
1079 | .ndo_stop = usbdrv_close, | ||
1080 | .ndo_start_xmit = usbdrv_xmit_frame, | ||
1081 | .ndo_change_mtu = usbdrv_change_mtu, | ||
1082 | .ndo_get_stats = usbdrv_get_stats, | ||
1083 | .ndo_set_multicast_list = usbdrv_set_multi, | ||
1084 | .ndo_set_mac_address = usbdrv_set_mac, | ||
1085 | .ndo_do_ioctl = usbdrv_ioctl, | ||
1086 | .ndo_validate_addr = eth_validate_addr, | ||
1087 | }; | ||
1088 | |||
1071 | u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp) | 1089 | u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp) |
1072 | { | 1090 | { |
1073 | //unsigned char addr[6]; | 1091 | //unsigned char addr[6]; |
@@ -1092,14 +1110,7 @@ u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp) | |||
1092 | dev->wireless_handlers = (struct iw_handler_def *)&p80211wext_handler_def; | 1110 | dev->wireless_handlers = (struct iw_handler_def *)&p80211wext_handler_def; |
1093 | #endif | 1111 | #endif |
1094 | 1112 | ||
1095 | dev->open = usbdrv_open; | 1113 | dev->netdev_ops = &otus_netdev_ops; |
1096 | dev->hard_start_xmit = usbdrv_xmit_frame; | ||
1097 | dev->stop = usbdrv_close; | ||
1098 | dev->change_mtu = &usbdrv_change_mtu; | ||
1099 | dev->get_stats = usbdrv_get_stats; | ||
1100 | dev->set_multicast_list = usbdrv_set_multi; | ||
1101 | dev->set_mac_address = usbdrv_set_mac; | ||
1102 | dev->do_ioctl = usbdrv_ioctl; | ||
1103 | 1114 | ||
1104 | dev->flags |= IFF_MULTICAST; | 1115 | dev->flags |= IFF_MULTICAST; |
1105 | 1116 | ||
diff --git a/drivers/staging/otus/zdusb.c b/drivers/staging/otus/zdusb.c index 78f1d2224fa1..2a6d937ba5e8 100644 --- a/drivers/staging/otus/zdusb.c +++ b/drivers/staging/otus/zdusb.c | |||
@@ -48,7 +48,8 @@ static const char driver_name[] = "Otus"; | |||
48 | static struct usb_device_id zd1221_ids [] = { | 48 | static struct usb_device_id zd1221_ids [] = { |
49 | { USB_DEVICE(VENDOR_ATHR, PRODUCT_AR9170) }, | 49 | { USB_DEVICE(VENDOR_ATHR, PRODUCT_AR9170) }, |
50 | { USB_DEVICE(VENDOR_DLINK, PRODUCT_DWA160A) }, | 50 | { USB_DEVICE(VENDOR_DLINK, PRODUCT_DWA160A) }, |
51 | { USB_DEVICE(0x0846, 0x9010) }, | 51 | { USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WNDA3100) }, |
52 | { USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WN111v2) }, | ||
52 | { } /* Terminating entry */ | 53 | { } /* Terminating entry */ |
53 | }; | 54 | }; |
54 | 55 | ||
diff --git a/drivers/staging/otus/zdusb.h b/drivers/staging/otus/zdusb.h index 656dc212ade5..9f8ab2e96169 100644 --- a/drivers/staging/otus/zdusb.h +++ b/drivers/staging/otus/zdusb.h | |||
@@ -40,4 +40,8 @@ | |||
40 | #define VENDOR_DLINK 0x07D1 //Dlink | 40 | #define VENDOR_DLINK 0x07D1 //Dlink |
41 | #define PRODUCT_DWA160A 0x3C10 | 41 | #define PRODUCT_DWA160A 0x3C10 |
42 | 42 | ||
43 | #define VENDOR_NETGEAR 0x0846 /* NetGear */ | ||
44 | #define PRODUCT_WNDA3100 0x9010 | ||
45 | #define PRODUCT_WN111v2 0x9001 | ||
46 | |||
43 | #endif | 47 | #endif |
diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c index 3e67da9ea381..a6eaa42fb669 100644 --- a/drivers/staging/pohmelfs/config.c +++ b/drivers/staging/pohmelfs/config.c | |||
@@ -81,6 +81,45 @@ static struct pohmelfs_config_group *pohmelfs_find_create_config_group(unsigned | |||
81 | return g; | 81 | return g; |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline void pohmelfs_insert_config_entry(struct pohmelfs_sb *psb, struct pohmelfs_config *dst) | ||
85 | { | ||
86 | struct pohmelfs_config *tmp; | ||
87 | |||
88 | INIT_LIST_HEAD(&dst->config_entry); | ||
89 | |||
90 | list_for_each_entry(tmp, &psb->state_list, config_entry) { | ||
91 | if (dst->state.ctl.prio > tmp->state.ctl.prio) | ||
92 | list_add_tail(&dst->config_entry, &tmp->config_entry); | ||
93 | } | ||
94 | if (list_empty(&dst->config_entry)) | ||
95 | list_add_tail(&dst->config_entry, &psb->state_list); | ||
96 | } | ||
97 | |||
98 | static int pohmelfs_move_config_entry(struct pohmelfs_sb *psb, | ||
99 | struct pohmelfs_config *dst, struct pohmelfs_config *new) | ||
100 | { | ||
101 | if ((dst->state.ctl.prio == new->state.ctl.prio) && | ||
102 | (dst->state.ctl.perm == new->state.ctl.perm)) | ||
103 | return 0; | ||
104 | |||
105 | dprintk("%s: dst: prio: %d, perm: %x, new: prio: %d, perm: %d.\n", | ||
106 | __func__, dst->state.ctl.prio, dst->state.ctl.perm, | ||
107 | new->state.ctl.prio, new->state.ctl.perm); | ||
108 | dst->state.ctl.prio = new->state.ctl.prio; | ||
109 | dst->state.ctl.perm = new->state.ctl.perm; | ||
110 | |||
111 | list_del_init(&dst->config_entry); | ||
112 | pohmelfs_insert_config_entry(psb, dst); | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * pohmelfs_copy_config() is used to copy new state configs from the | ||
118 | * config group (controlled by the netlink messages) into the superblock. | ||
119 | * This happens either at startup time where no transactions can access | ||
120 | * the list of the configs (and thus list of the network states), or at | ||
121 | * run-time, where it is protected by the psb->state_lock. | ||
122 | */ | ||
84 | int pohmelfs_copy_config(struct pohmelfs_sb *psb) | 123 | int pohmelfs_copy_config(struct pohmelfs_sb *psb) |
85 | { | 124 | { |
86 | struct pohmelfs_config_group *g; | 125 | struct pohmelfs_config_group *g; |
@@ -103,7 +142,9 @@ int pohmelfs_copy_config(struct pohmelfs_sb *psb) | |||
103 | err = 0; | 142 | err = 0; |
104 | list_for_each_entry(dst, &psb->state_list, config_entry) { | 143 | list_for_each_entry(dst, &psb->state_list, config_entry) { |
105 | if (pohmelfs_config_eql(&dst->state.ctl, &c->state.ctl)) { | 144 | if (pohmelfs_config_eql(&dst->state.ctl, &c->state.ctl)) { |
106 | err = -EEXIST; | 145 | err = pohmelfs_move_config_entry(psb, dst, c); |
146 | if (!err) | ||
147 | err = -EEXIST; | ||
107 | break; | 148 | break; |
108 | } | 149 | } |
109 | } | 150 | } |
@@ -119,7 +160,7 @@ int pohmelfs_copy_config(struct pohmelfs_sb *psb) | |||
119 | 160 | ||
120 | memcpy(&dst->state.ctl, &c->state.ctl, sizeof(struct pohmelfs_ctl)); | 161 | memcpy(&dst->state.ctl, &c->state.ctl, sizeof(struct pohmelfs_ctl)); |
121 | 162 | ||
122 | list_add_tail(&dst->config_entry, &psb->state_list); | 163 | pohmelfs_insert_config_entry(psb, dst); |
123 | 164 | ||
124 | err = pohmelfs_state_init_one(psb, dst); | 165 | err = pohmelfs_state_init_one(psb, dst); |
125 | if (err) { | 166 | if (err) { |
@@ -248,6 +289,13 @@ out_unlock: | |||
248 | return err; | 289 | return err; |
249 | } | 290 | } |
250 | 291 | ||
292 | static int pohmelfs_modify_config(struct pohmelfs_ctl *old, struct pohmelfs_ctl *new) | ||
293 | { | ||
294 | old->perm = new->perm; | ||
295 | old->prio = new->prio; | ||
296 | return 0; | ||
297 | } | ||
298 | |||
251 | static int pohmelfs_cn_ctl(struct cn_msg *msg, int action) | 299 | static int pohmelfs_cn_ctl(struct cn_msg *msg, int action) |
252 | { | 300 | { |
253 | struct pohmelfs_config_group *g; | 301 | struct pohmelfs_config_group *g; |
@@ -278,6 +326,9 @@ static int pohmelfs_cn_ctl(struct cn_msg *msg, int action) | |||
278 | g->num_entry--; | 326 | g->num_entry--; |
279 | kfree(c); | 327 | kfree(c); |
280 | goto out_unlock; | 328 | goto out_unlock; |
329 | } else if (action == POHMELFS_FLAGS_MODIFY) { | ||
330 | err = pohmelfs_modify_config(sc, ctl); | ||
331 | goto out_unlock; | ||
281 | } else { | 332 | } else { |
282 | err = -EEXIST; | 333 | err = -EEXIST; |
283 | goto out_unlock; | 334 | goto out_unlock; |
@@ -296,6 +347,7 @@ static int pohmelfs_cn_ctl(struct cn_msg *msg, int action) | |||
296 | } | 347 | } |
297 | memcpy(&c->state.ctl, ctl, sizeof(struct pohmelfs_ctl)); | 348 | memcpy(&c->state.ctl, ctl, sizeof(struct pohmelfs_ctl)); |
298 | g->num_entry++; | 349 | g->num_entry++; |
350 | |||
299 | list_add_tail(&c->config_entry, &g->config_list); | 351 | list_add_tail(&c->config_entry, &g->config_list); |
300 | 352 | ||
301 | out_unlock: | 353 | out_unlock: |
@@ -401,10 +453,9 @@ static void pohmelfs_cn_callback(void *data) | |||
401 | 453 | ||
402 | switch (msg->flags) { | 454 | switch (msg->flags) { |
403 | case POHMELFS_FLAGS_ADD: | 455 | case POHMELFS_FLAGS_ADD: |
404 | err = pohmelfs_cn_ctl(msg, POHMELFS_FLAGS_ADD); | ||
405 | break; | ||
406 | case POHMELFS_FLAGS_DEL: | 456 | case POHMELFS_FLAGS_DEL: |
407 | err = pohmelfs_cn_ctl(msg, POHMELFS_FLAGS_DEL); | 457 | case POHMELFS_FLAGS_MODIFY: |
458 | err = pohmelfs_cn_ctl(msg, msg->flags); | ||
408 | break; | 459 | break; |
409 | case POHMELFS_FLAGS_SHOW: | 460 | case POHMELFS_FLAGS_SHOW: |
410 | err = pohmelfs_cn_disp(msg); | 461 | err = pohmelfs_cn_disp(msg); |
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c index 7a41183a32e1..b5799842fb84 100644 --- a/drivers/staging/pohmelfs/dir.c +++ b/drivers/staging/pohmelfs/dir.c | |||
@@ -328,7 +328,7 @@ static int pohmelfs_sync_remote_dir(struct pohmelfs_inode *pi) | |||
328 | { | 328 | { |
329 | struct inode *inode = &pi->vfs_inode; | 329 | struct inode *inode = &pi->vfs_inode; |
330 | struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); | 330 | struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); |
331 | long ret = msecs_to_jiffies(25000); | 331 | long ret = psb->wait_on_page_timeout; |
332 | int err; | 332 | int err; |
333 | 333 | ||
334 | dprintk("%s: dir: %llu, state: %lx: remote_synced: %d.\n", | 334 | dprintk("%s: dir: %llu, state: %lx: remote_synced: %d.\n", |
@@ -389,11 +389,11 @@ static int pohmelfs_readdir(struct file *file, void *dirent, filldir_t filldir) | |||
389 | dprintk("%s: parent: %llu, fpos: %llu, hash: %08lx.\n", | 389 | dprintk("%s: parent: %llu, fpos: %llu, hash: %08lx.\n", |
390 | __func__, pi->ino, (u64)file->f_pos, | 390 | __func__, pi->ino, (u64)file->f_pos, |
391 | (unsigned long)file->private_data); | 391 | (unsigned long)file->private_data); |
392 | 392 | #if 0 | |
393 | err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); | 393 | err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); |
394 | if (err) | 394 | if (err) |
395 | return err; | 395 | return err; |
396 | 396 | #endif | |
397 | err = pohmelfs_sync_remote_dir(pi); | 397 | err = pohmelfs_sync_remote_dir(pi); |
398 | if (err) | 398 | if (err) |
399 | return err; | 399 | return err; |
@@ -513,10 +513,6 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct | |||
513 | 513 | ||
514 | need_lock = pohmelfs_need_lock(parent, lock_type); | 514 | need_lock = pohmelfs_need_lock(parent, lock_type); |
515 | 515 | ||
516 | err = pohmelfs_data_lock(parent, 0, ~0, lock_type); | ||
517 | if (err) | ||
518 | goto out; | ||
519 | |||
520 | str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); | 516 | str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); |
521 | 517 | ||
522 | mutex_lock(&parent->offset_lock); | 518 | mutex_lock(&parent->offset_lock); |
@@ -525,8 +521,8 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct | |||
525 | ino = n->ino; | 521 | ino = n->ino; |
526 | mutex_unlock(&parent->offset_lock); | 522 | mutex_unlock(&parent->offset_lock); |
527 | 523 | ||
528 | dprintk("%s: 1 ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx.\n", | 524 | dprintk("%s: start ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx, need_lock: %d.\n", |
529 | __func__, ino, inode, str.name, str.hash, parent->state); | 525 | __func__, ino, inode, str.name, str.hash, parent->state, need_lock); |
530 | 526 | ||
531 | if (ino) { | 527 | if (ino) { |
532 | inode = ilookup(dir->i_sb, ino); | 528 | inode = ilookup(dir->i_sb, ino); |
@@ -534,7 +530,7 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct | |||
534 | goto out; | 530 | goto out; |
535 | } | 531 | } |
536 | 532 | ||
537 | dprintk("%s: dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n", | 533 | dprintk("%s: no inode dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n", |
538 | __func__, dir, parent->ino, | 534 | __func__, dir, parent->ino, |
539 | str.name, str.len, parent->state, ino); | 535 | str.name, str.len, parent->state, ino); |
540 | 536 | ||
@@ -543,6 +539,10 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct | |||
543 | goto out; | 539 | goto out; |
544 | } | 540 | } |
545 | 541 | ||
542 | err = pohmelfs_data_lock(parent, 0, ~0, lock_type); | ||
543 | if (err) | ||
544 | goto out; | ||
545 | |||
546 | err = pohmelfs_lookup_single(parent, &str, ino); | 546 | err = pohmelfs_lookup_single(parent, &str, ino); |
547 | if (err) | 547 | if (err) |
548 | goto out; | 548 | goto out; |
@@ -557,10 +557,10 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct | |||
557 | 557 | ||
558 | if (ino) { | 558 | if (ino) { |
559 | inode = ilookup(dir->i_sb, ino); | 559 | inode = ilookup(dir->i_sb, ino); |
560 | printk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n", | 560 | dprintk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n", |
561 | __func__, ino, inode, str.name, str.hash); | 561 | __func__, ino, inode, str.name, str.hash); |
562 | if (!inode) { | 562 | if (!inode) { |
563 | printk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n", | 563 | dprintk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n", |
564 | __func__, ino, str.name, str.hash); | 564 | __func__, ino, str.name, str.hash); |
565 | //return NULL; | 565 | //return NULL; |
566 | return ERR_PTR(-EACCES); | 566 | return ERR_PTR(-EACCES); |
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c index 5bf16504cd6f..b2eaf9047266 100644 --- a/drivers/staging/pohmelfs/inode.c +++ b/drivers/staging/pohmelfs/inode.c | |||
@@ -1169,16 +1169,17 @@ err_out_put: | |||
1169 | static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) | 1169 | static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) |
1170 | { | 1170 | { |
1171 | struct inode *inode = dentry->d_inode; | 1171 | struct inode *inode = dentry->d_inode; |
1172 | #if 0 | ||
1172 | struct pohmelfs_inode *pi = POHMELFS_I(inode); | 1173 | struct pohmelfs_inode *pi = POHMELFS_I(inode); |
1173 | int err; | 1174 | int err; |
1174 | 1175 | ||
1175 | err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); | 1176 | err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); |
1176 | if (err) | 1177 | if (err) |
1177 | return err; | 1178 | return err; |
1178 | |||
1179 | dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n", | 1179 | dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n", |
1180 | __func__, pi->ino, inode->i_mode, inode->i_uid, | 1180 | __func__, pi->ino, inode->i_mode, inode->i_uid, |
1181 | inode->i_gid, inode->i_size); | 1181 | inode->i_gid, inode->i_size); |
1182 | #endif | ||
1182 | 1183 | ||
1183 | generic_fillattr(inode, stat); | 1184 | generic_fillattr(inode, stat); |
1184 | return 0; | 1185 | return 0; |
@@ -1342,14 +1343,6 @@ static void pohmelfs_put_super(struct super_block *sb) | |||
1342 | 1343 | ||
1343 | kfree(psb); | 1344 | kfree(psb); |
1344 | sb->s_fs_info = NULL; | 1345 | sb->s_fs_info = NULL; |
1345 | |||
1346 | pohmelfs_ftrans_exit(); | ||
1347 | } | ||
1348 | |||
1349 | static int pohmelfs_remount(struct super_block *sb, int *flags, char *data) | ||
1350 | { | ||
1351 | *flags |= MS_RDONLY; | ||
1352 | return 0; | ||
1353 | } | 1346 | } |
1354 | 1347 | ||
1355 | static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf) | 1348 | static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
@@ -1394,42 +1387,33 @@ static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
1394 | return 0; | 1387 | return 0; |
1395 | } | 1388 | } |
1396 | 1389 | ||
1397 | static const struct super_operations pohmelfs_sb_ops = { | ||
1398 | .alloc_inode = pohmelfs_alloc_inode, | ||
1399 | .destroy_inode = pohmelfs_destroy_inode, | ||
1400 | .drop_inode = pohmelfs_drop_inode, | ||
1401 | .write_inode = pohmelfs_write_inode, | ||
1402 | .put_super = pohmelfs_put_super, | ||
1403 | .remount_fs = pohmelfs_remount, | ||
1404 | .statfs = pohmelfs_statfs, | ||
1405 | .show_options = pohmelfs_show_options, | ||
1406 | }; | ||
1407 | |||
1408 | enum { | 1390 | enum { |
1409 | pohmelfs_opt_idx, | 1391 | pohmelfs_opt_idx, |
1392 | pohmelfs_opt_crypto_thread_num, | ||
1393 | pohmelfs_opt_trans_max_pages, | ||
1394 | pohmelfs_opt_crypto_fail_unsupported, | ||
1395 | |||
1396 | /* Remountable options */ | ||
1410 | pohmelfs_opt_trans_scan_timeout, | 1397 | pohmelfs_opt_trans_scan_timeout, |
1411 | pohmelfs_opt_drop_scan_timeout, | 1398 | pohmelfs_opt_drop_scan_timeout, |
1412 | pohmelfs_opt_wait_on_page_timeout, | 1399 | pohmelfs_opt_wait_on_page_timeout, |
1413 | pohmelfs_opt_trans_retries, | 1400 | pohmelfs_opt_trans_retries, |
1414 | pohmelfs_opt_crypto_thread_num, | ||
1415 | pohmelfs_opt_trans_max_pages, | ||
1416 | pohmelfs_opt_crypto_fail_unsupported, | ||
1417 | pohmelfs_opt_mcache_timeout, | 1401 | pohmelfs_opt_mcache_timeout, |
1418 | }; | 1402 | }; |
1419 | 1403 | ||
1420 | static struct match_token pohmelfs_tokens[] = { | 1404 | static struct match_token pohmelfs_tokens[] = { |
1421 | {pohmelfs_opt_idx, "idx=%u"}, | 1405 | {pohmelfs_opt_idx, "idx=%u"}, |
1406 | {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"}, | ||
1407 | {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"}, | ||
1408 | {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"}, | ||
1422 | {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"}, | 1409 | {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"}, |
1423 | {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"}, | 1410 | {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"}, |
1424 | {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"}, | 1411 | {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"}, |
1425 | {pohmelfs_opt_trans_retries, "trans_retries=%u"}, | 1412 | {pohmelfs_opt_trans_retries, "trans_retries=%u"}, |
1426 | {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"}, | ||
1427 | {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"}, | ||
1428 | {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"}, | ||
1429 | {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"}, | 1413 | {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"}, |
1430 | }; | 1414 | }; |
1431 | 1415 | ||
1432 | static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb) | 1416 | static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount) |
1433 | { | 1417 | { |
1434 | char *p; | 1418 | char *p; |
1435 | substring_t args[MAX_OPT_ARGS]; | 1419 | substring_t args[MAX_OPT_ARGS]; |
@@ -1449,6 +1433,9 @@ static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb) | |||
1449 | if (err) | 1433 | if (err) |
1450 | return err; | 1434 | return err; |
1451 | 1435 | ||
1436 | if (remount && token <= pohmelfs_opt_crypto_fail_unsupported) | ||
1437 | continue; | ||
1438 | |||
1452 | switch (token) { | 1439 | switch (token) { |
1453 | case pohmelfs_opt_idx: | 1440 | case pohmelfs_opt_idx: |
1454 | psb->idx = option; | 1441 | psb->idx = option; |
@@ -1485,6 +1472,25 @@ static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb) | |||
1485 | return 0; | 1472 | return 0; |
1486 | } | 1473 | } |
1487 | 1474 | ||
1475 | static int pohmelfs_remount(struct super_block *sb, int *flags, char *data) | ||
1476 | { | ||
1477 | int err; | ||
1478 | struct pohmelfs_sb *psb = POHMELFS_SB(sb); | ||
1479 | unsigned long old_sb_flags = sb->s_flags; | ||
1480 | |||
1481 | err = pohmelfs_parse_options(data, psb, 1); | ||
1482 | if (err) | ||
1483 | goto err_out_restore; | ||
1484 | |||
1485 | if (!(*flags & MS_RDONLY)) | ||
1486 | sb->s_flags &= ~MS_RDONLY; | ||
1487 | return 0; | ||
1488 | |||
1489 | err_out_restore: | ||
1490 | sb->s_flags = old_sb_flags; | ||
1491 | return err; | ||
1492 | } | ||
1493 | |||
1488 | static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count) | 1494 | static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count) |
1489 | { | 1495 | { |
1490 | struct inode *inode = &pi->vfs_inode; | 1496 | struct inode *inode = &pi->vfs_inode; |
@@ -1753,6 +1759,57 @@ err_out_exit: | |||
1753 | return err; | 1759 | return err; |
1754 | } | 1760 | } |
1755 | 1761 | ||
1762 | static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt) | ||
1763 | { | ||
1764 | struct netfs_state *st; | ||
1765 | struct pohmelfs_ctl *ctl; | ||
1766 | struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb); | ||
1767 | struct pohmelfs_config *c; | ||
1768 | |||
1769 | mutex_lock(&psb->state_lock); | ||
1770 | |||
1771 | seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n"); | ||
1772 | |||
1773 | list_for_each_entry(c, &psb->state_list, config_entry) { | ||
1774 | st = &c->state; | ||
1775 | ctl = &st->ctl; | ||
1776 | |||
1777 | seq_printf(m, "%u ", ctl->idx); | ||
1778 | if (ctl->addr.sa_family == AF_INET) { | ||
1779 | struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr; | ||
1780 | //seq_printf(m, "%pi4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port)); | ||
1781 | seq_printf(m, "%u.%u.%u.%u:%u", NIPQUAD(sin->sin_addr.s_addr), ntohs(sin->sin_port)); | ||
1782 | } else if (ctl->addr.sa_family == AF_INET6) { | ||
1783 | struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr; | ||
1784 | seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port)); | ||
1785 | } else { | ||
1786 | unsigned int i; | ||
1787 | for (i=0; i<ctl->addrlen; ++i) | ||
1788 | seq_printf(m, "%02x.", ctl->addr.addr[i]); | ||
1789 | } | ||
1790 | |||
1791 | seq_printf(m, " %u %u %d %u %x\n", | ||
1792 | ctl->type, ctl->proto, | ||
1793 | st->socket != NULL, | ||
1794 | ctl->prio, ctl->perm); | ||
1795 | } | ||
1796 | mutex_unlock(&psb->state_lock); | ||
1797 | |||
1798 | return 0; | ||
1799 | } | ||
1800 | |||
1801 | static const struct super_operations pohmelfs_sb_ops = { | ||
1802 | .alloc_inode = pohmelfs_alloc_inode, | ||
1803 | .destroy_inode = pohmelfs_destroy_inode, | ||
1804 | .drop_inode = pohmelfs_drop_inode, | ||
1805 | .write_inode = pohmelfs_write_inode, | ||
1806 | .put_super = pohmelfs_put_super, | ||
1807 | .remount_fs = pohmelfs_remount, | ||
1808 | .statfs = pohmelfs_statfs, | ||
1809 | .show_options = pohmelfs_show_options, | ||
1810 | .show_stats = pohmelfs_show_stats, | ||
1811 | }; | ||
1812 | |||
1756 | /* | 1813 | /* |
1757 | * Allocate private superblock and create root dir. | 1814 | * Allocate private superblock and create root dir. |
1758 | */ | 1815 | */ |
@@ -1764,8 +1821,6 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) | |||
1764 | struct pohmelfs_inode *npi; | 1821 | struct pohmelfs_inode *npi; |
1765 | struct qstr str; | 1822 | struct qstr str; |
1766 | 1823 | ||
1767 | pohmelfs_ftrans_init(); | ||
1768 | |||
1769 | psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL); | 1824 | psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL); |
1770 | if (!psb) | 1825 | if (!psb) |
1771 | goto err_out_exit; | 1826 | goto err_out_exit; |
@@ -1816,7 +1871,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) | |||
1816 | mutex_init(&psb->state_lock); | 1871 | mutex_init(&psb->state_lock); |
1817 | INIT_LIST_HEAD(&psb->state_list); | 1872 | INIT_LIST_HEAD(&psb->state_list); |
1818 | 1873 | ||
1819 | err = pohmelfs_parse_options((char *) data, psb); | 1874 | err = pohmelfs_parse_options((char *) data, psb, 0); |
1820 | if (err) | 1875 | if (err) |
1821 | goto err_out_free_sb; | 1876 | goto err_out_free_sb; |
1822 | 1877 | ||
@@ -1845,6 +1900,8 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) | |||
1845 | err = PTR_ERR(npi); | 1900 | err = PTR_ERR(npi); |
1846 | goto err_out_crypto_exit; | 1901 | goto err_out_crypto_exit; |
1847 | } | 1902 | } |
1903 | set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state); | ||
1904 | clear_bit(NETFS_INODE_OWNED, &npi->state); | ||
1848 | 1905 | ||
1849 | root = &npi->vfs_inode; | 1906 | root = &npi->vfs_inode; |
1850 | 1907 | ||
@@ -1887,11 +1944,29 @@ static int pohmelfs_get_sb(struct file_system_type *fs_type, | |||
1887 | mnt); | 1944 | mnt); |
1888 | } | 1945 | } |
1889 | 1946 | ||
1947 | /* | ||
1948 | * We need this to sync all inodes earlier, since when writeback | ||
1949 | * is invoked from the umount/mntput path dcache is already shrunk, | ||
1950 | * see generic_shutdown_super(), and no inodes can access the path. | ||
1951 | */ | ||
1952 | static void pohmelfs_kill_super(struct super_block *sb) | ||
1953 | { | ||
1954 | struct writeback_control wbc = { | ||
1955 | .sync_mode = WB_SYNC_ALL, | ||
1956 | .range_start = 0, | ||
1957 | .range_end = LLONG_MAX, | ||
1958 | .nr_to_write = LONG_MAX, | ||
1959 | }; | ||
1960 | generic_sync_sb_inodes(sb, &wbc); | ||
1961 | |||
1962 | kill_anon_super(sb); | ||
1963 | } | ||
1964 | |||
1890 | static struct file_system_type pohmel_fs_type = { | 1965 | static struct file_system_type pohmel_fs_type = { |
1891 | .owner = THIS_MODULE, | 1966 | .owner = THIS_MODULE, |
1892 | .name = "pohmel", | 1967 | .name = "pohmel", |
1893 | .get_sb = pohmelfs_get_sb, | 1968 | .get_sb = pohmelfs_get_sb, |
1894 | .kill_sb = kill_anon_super, | 1969 | .kill_sb = pohmelfs_kill_super, |
1895 | }; | 1970 | }; |
1896 | 1971 | ||
1897 | /* | 1972 | /* |
diff --git a/drivers/staging/pohmelfs/lock.c b/drivers/staging/pohmelfs/lock.c index ad4a18559bdd..22fef18cae90 100644 --- a/drivers/staging/pohmelfs/lock.c +++ b/drivers/staging/pohmelfs/lock.c | |||
@@ -41,7 +41,8 @@ static int pohmelfs_send_lock_trans(struct pohmelfs_inode *pi, | |||
41 | path_len = err; | 41 | path_len = err; |
42 | 42 | ||
43 | err = -ENOMEM; | 43 | err = -ENOMEM; |
44 | t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize, 0, 0); | 44 | t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize, |
45 | NETFS_TRANS_SINGLE_DST, 0); | ||
45 | if (!t) | 46 | if (!t) |
46 | goto err_out_exit; | 47 | goto err_out_exit; |
47 | 48 | ||
diff --git a/drivers/staging/pohmelfs/net.c b/drivers/staging/pohmelfs/net.c index c9b8540c1efe..11ecac026ca7 100644 --- a/drivers/staging/pohmelfs/net.c +++ b/drivers/staging/pohmelfs/net.c | |||
@@ -26,55 +26,6 @@ | |||
26 | 26 | ||
27 | #include "netfs.h" | 27 | #include "netfs.h" |
28 | 28 | ||
29 | static int pohmelfs_ftrans_size = 10240; | ||
30 | static u32 *pohmelfs_ftrans; | ||
31 | |||
32 | int pohmelfs_ftrans_init(void) | ||
33 | { | ||
34 | pohmelfs_ftrans = vmalloc(pohmelfs_ftrans_size * 4); | ||
35 | if (!pohmelfs_ftrans) | ||
36 | return -ENOMEM; | ||
37 | |||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | void pohmelfs_ftrans_exit(void) | ||
42 | { | ||
43 | vfree(pohmelfs_ftrans); | ||
44 | } | ||
45 | |||
46 | void pohmelfs_ftrans_clean(u64 id) | ||
47 | { | ||
48 | if (pohmelfs_ftrans) { | ||
49 | u32 i = id & 0xffffffff; | ||
50 | int idx = i % pohmelfs_ftrans_size; | ||
51 | |||
52 | pohmelfs_ftrans[idx] = 0; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | void pohmelfs_ftrans_update(u64 id) | ||
57 | { | ||
58 | if (pohmelfs_ftrans) { | ||
59 | u32 i = id & 0xffffffff; | ||
60 | int idx = i % pohmelfs_ftrans_size; | ||
61 | |||
62 | pohmelfs_ftrans[idx] = i; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | int pohmelfs_ftrans_check(u64 id) | ||
67 | { | ||
68 | if (pohmelfs_ftrans) { | ||
69 | u32 i = id & 0xffffffff; | ||
70 | int idx = i % pohmelfs_ftrans_size; | ||
71 | |||
72 | return (pohmelfs_ftrans[idx] == i); | ||
73 | } | ||
74 | |||
75 | return -1; | ||
76 | } | ||
77 | |||
78 | /* | 29 | /* |
79 | * Async machinery lives here. | 30 | * Async machinery lives here. |
80 | * All commands being sent to server do _not_ require sync reply, | 31 | * All commands being sent to server do _not_ require sync reply, |
@@ -450,8 +401,24 @@ static int pohmelfs_readdir_response(struct netfs_state *st) | |||
450 | if (err != -EEXIST) | 401 | if (err != -EEXIST) |
451 | goto err_out_put; | 402 | goto err_out_put; |
452 | } else { | 403 | } else { |
404 | struct dentry *dentry, *alias, *pd; | ||
405 | |||
453 | set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state); | 406 | set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state); |
454 | clear_bit(NETFS_INODE_OWNED, &npi->state); | 407 | clear_bit(NETFS_INODE_OWNED, &npi->state); |
408 | |||
409 | pd = d_find_alias(&parent->vfs_inode); | ||
410 | if (pd) { | ||
411 | str.hash = full_name_hash(str.name, str.len); | ||
412 | dentry = d_alloc(pd, &str); | ||
413 | if (dentry) { | ||
414 | alias = d_materialise_unique(dentry, &npi->vfs_inode); | ||
415 | if (alias) | ||
416 | dput(dentry); | ||
417 | } | ||
418 | |||
419 | dput(dentry); | ||
420 | dput(pd); | ||
421 | } | ||
455 | } | 422 | } |
456 | } | 423 | } |
457 | out: | 424 | out: |
@@ -638,15 +605,12 @@ static int pohmelfs_transaction_response(struct netfs_state *st) | |||
638 | if (dst) { | 605 | if (dst) { |
639 | netfs_trans_remove_nolock(dst, st); | 606 | netfs_trans_remove_nolock(dst, st); |
640 | t = dst->trans; | 607 | t = dst->trans; |
641 | |||
642 | pohmelfs_ftrans_update(cmd->start); | ||
643 | } | 608 | } |
644 | mutex_unlock(&st->trans_lock); | 609 | mutex_unlock(&st->trans_lock); |
645 | 610 | ||
646 | if (!t) { | 611 | if (!t) { |
647 | int check = pohmelfs_ftrans_check(cmd->start); | 612 | printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u.\n", |
648 | printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u, double: %d.\n", | 613 | __func__, cmd->start, cmd->id, cmd->size, cmd->ext); |
649 | __func__, cmd->start, cmd->id, cmd->size, cmd->ext, check); | ||
650 | err = -EINVAL; | 614 | err = -EINVAL; |
651 | goto out; | 615 | goto out; |
652 | } | 616 | } |
diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h index 2ff21ae5bb12..c78cfcb042fb 100644 --- a/drivers/staging/pohmelfs/netfs.h +++ b/drivers/staging/pohmelfs/netfs.h | |||
@@ -87,6 +87,7 @@ enum { | |||
87 | POHMELFS_FLAGS_DEL, /* Network state control message for DEL */ | 87 | POHMELFS_FLAGS_DEL, /* Network state control message for DEL */ |
88 | POHMELFS_FLAGS_SHOW, /* Network state control message for SHOW */ | 88 | POHMELFS_FLAGS_SHOW, /* Network state control message for SHOW */ |
89 | POHMELFS_FLAGS_CRYPTO, /* Crypto data control message */ | 89 | POHMELFS_FLAGS_CRYPTO, /* Crypto data control message */ |
90 | POHMELFS_FLAGS_MODIFY, /* Network state modification message */ | ||
90 | }; | 91 | }; |
91 | 92 | ||
92 | /* | 93 | /* |
@@ -116,16 +117,20 @@ struct pohmelfs_crypto | |||
116 | unsigned char data[0]; /* Algorithm string, key and IV */ | 117 | unsigned char data[0]; /* Algorithm string, key and IV */ |
117 | }; | 118 | }; |
118 | 119 | ||
120 | #define POHMELFS_IO_PERM_READ (1<<0) | ||
121 | #define POHMELFS_IO_PERM_WRITE (1<<1) | ||
122 | |||
119 | /* | 123 | /* |
120 | * Configuration command used to create table of different remote servers. | 124 | * Configuration command used to create table of different remote servers. |
121 | */ | 125 | */ |
122 | struct pohmelfs_ctl | 126 | struct pohmelfs_ctl |
123 | { | 127 | { |
124 | unsigned int idx; /* Config index */ | 128 | __u32 idx; /* Config index */ |
125 | unsigned int type; /* Socket type */ | 129 | __u32 type; /* Socket type */ |
126 | unsigned int proto; /* Socket protocol */ | 130 | __u32 proto; /* Socket protocol */ |
127 | unsigned int addrlen; /* Size of the address */ | 131 | __u16 addrlen; /* Size of the address */ |
128 | unsigned short unused; /* Align structure by 4 bytes */ | 132 | __u16 perm; /* IO permission */ |
133 | __u16 prio; /* IO priority */ | ||
129 | struct saddr addr; /* Remote server address */ | 134 | struct saddr addr; /* Remote server address */ |
130 | }; | 135 | }; |
131 | 136 | ||
@@ -921,12 +926,6 @@ static inline void pohmelfs_mcache_put(struct pohmelfs_sb *psb, | |||
921 | pohmelfs_mcache_free(psb, m); | 926 | pohmelfs_mcache_free(psb, m); |
922 | } | 927 | } |
923 | 928 | ||
924 | int pohmelfs_ftrans_init(void); | ||
925 | void pohmelfs_ftrans_exit(void); | ||
926 | void pohmelfs_ftrans_update(u64 id); | ||
927 | int pohmelfs_ftrans_check(u64 id); | ||
928 | void pohmelfs_ftrans_clean(u64 id); | ||
929 | |||
930 | #endif /* __KERNEL__*/ | 929 | #endif /* __KERNEL__*/ |
931 | 930 | ||
932 | #endif /* __NETFS_H */ | 931 | #endif /* __NETFS_H */ |
diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c index bcb59425a21c..fef5f9bd6920 100644 --- a/drivers/staging/pohmelfs/trans.c +++ b/drivers/staging/pohmelfs/trans.c | |||
@@ -456,34 +456,25 @@ int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb) | |||
456 | __func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state); | 456 | __func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state); |
457 | #endif | 457 | #endif |
458 | mutex_lock(&psb->state_lock); | 458 | mutex_lock(&psb->state_lock); |
459 | list_for_each_entry(c, &psb->state_list, config_entry) { | ||
460 | st = &c->state; | ||
459 | 461 | ||
460 | if ((t->flags & NETFS_TRANS_SINGLE_DST) && psb->active_state) { | 462 | if (t->flags & NETFS_TRANS_SINGLE_DST) { |
461 | st = &psb->active_state->state; | 463 | if (!(st->ctl.perm & POHMELFS_IO_PERM_READ)) |
462 | 464 | continue; | |
463 | err = -EPIPE; | 465 | } else { |
464 | if (netfs_state_poll(st) & POLLOUT) { | 466 | if (!(st->ctl.perm & POHMELFS_IO_PERM_WRITE)) |
465 | err = netfs_trans_push_dst(t, st); | 467 | continue; |
466 | if (!err) { | ||
467 | err = netfs_trans_send(t, st); | ||
468 | if (err) { | ||
469 | netfs_trans_drop_last(t, st); | ||
470 | } else { | ||
471 | pohmelfs_switch_active(psb); | ||
472 | goto out; | ||
473 | } | ||
474 | } | ||
475 | } | 468 | } |
476 | pohmelfs_switch_active(psb); | ||
477 | } | ||
478 | 469 | ||
479 | list_for_each_entry(c, &psb->state_list, config_entry) { | 470 | if (psb->active_state && (psb->active_state->state.ctl.prio >= st->ctl.prio)) |
480 | st = &c->state; | 471 | st = &psb->active_state->state; |
481 | 472 | ||
482 | err = netfs_trans_push(t, st); | 473 | err = netfs_trans_push(t, st); |
483 | if (!err && (t->flags & NETFS_TRANS_SINGLE_DST)) | 474 | if (!err && (t->flags & NETFS_TRANS_SINGLE_DST)) |
484 | break; | 475 | break; |
485 | } | 476 | } |
486 | out: | 477 | |
487 | mutex_unlock(&psb->state_lock); | 478 | mutex_unlock(&psb->state_lock); |
488 | #if 0 | 479 | #if 0 |
489 | dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n", | 480 | dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n", |
@@ -501,8 +492,6 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb) | |||
501 | 492 | ||
502 | t->gen = atomic_inc_return(&psb->trans_gen); | 493 | t->gen = atomic_inc_return(&psb->trans_gen); |
503 | 494 | ||
504 | pohmelfs_ftrans_clean(t->gen); | ||
505 | |||
506 | cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) + | 495 | cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) + |
507 | t->attached_size + t->attached_pages * sizeof(struct netfs_cmd); | 496 | t->attached_size + t->attached_pages * sizeof(struct netfs_cmd); |
508 | cmd->cmd = NETFS_TRANS; | 497 | cmd->cmd = NETFS_TRANS; |
diff --git a/drivers/staging/rt2860/rt_main_dev.c b/drivers/staging/rt2860/rt_main_dev.c index cf17bcdd7333..6c4396f0903b 100644 --- a/drivers/staging/rt2860/rt_main_dev.c +++ b/drivers/staging/rt2860/rt_main_dev.c | |||
@@ -722,6 +722,20 @@ err: | |||
722 | return (-1); | 722 | return (-1); |
723 | } /* End of rt28xx_open */ | 723 | } /* End of rt28xx_open */ |
724 | 724 | ||
725 | static const struct net_device_ops rt2860_netdev_ops = { | ||
726 | .ndo_open = MainVirtualIF_open, | ||
727 | .ndo_stop = MainVirtualIF_close, | ||
728 | .ndo_do_ioctl = rt28xx_ioctl, | ||
729 | .ndo_get_stats = RT28xx_get_ether_stats, | ||
730 | .ndo_validate_addr = NULL, | ||
731 | .ndo_set_mac_address = eth_mac_addr, | ||
732 | .ndo_change_mtu = eth_change_mtu, | ||
733 | #ifdef IKANOS_VX_1X0 | ||
734 | .ndo_start_xmit = IKANOS_DataFramesTx, | ||
735 | #else | ||
736 | .ndo_start_xmit = rt28xx_send_packets, | ||
737 | #endif | ||
738 | }; | ||
725 | 739 | ||
726 | /* Must not be called for mdev and apdev */ | 740 | /* Must not be called for mdev and apdev */ |
727 | static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) | 741 | static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) |
@@ -733,11 +747,6 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p | |||
733 | 747 | ||
734 | 748 | ||
735 | //ether_setup(dev); | 749 | //ether_setup(dev); |
736 | dev->hard_start_xmit = rt28xx_send_packets; | ||
737 | |||
738 | #ifdef IKANOS_VX_1X0 | ||
739 | dev->hard_start_xmit = IKANOS_DataFramesTx; | ||
740 | #endif // IKANOS_VX_1X0 // | ||
741 | 750 | ||
742 | #ifdef CONFIG_STA_SUPPORT | 751 | #ifdef CONFIG_STA_SUPPORT |
743 | #if WIRELESS_EXT >= 12 | 752 | #if WIRELESS_EXT >= 12 |
@@ -760,12 +769,8 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p | |||
760 | #if WIRELESS_EXT < 21 | 769 | #if WIRELESS_EXT < 21 |
761 | dev->get_wireless_stats = rt28xx_get_wireless_stats; | 770 | dev->get_wireless_stats = rt28xx_get_wireless_stats; |
762 | #endif | 771 | #endif |
763 | dev->get_stats = RT28xx_get_ether_stats; | ||
764 | dev->open = MainVirtualIF_open; //rt28xx_open; | ||
765 | dev->stop = MainVirtualIF_close; //rt28xx_close; | ||
766 | dev->priv_flags = INT_MAIN; | 772 | dev->priv_flags = INT_MAIN; |
767 | dev->do_ioctl = rt28xx_ioctl; | 773 | dev->netdev_ops = &rt2860_netdev_ops; |
768 | dev->validate_addr = NULL; | ||
769 | // find available device name | 774 | // find available device name |
770 | for (i = 0; i < 8; i++) | 775 | for (i = 0; i < 8; i++) |
771 | { | 776 | { |
diff --git a/drivers/staging/rt2870/rt2870.h b/drivers/staging/rt2870/rt2870.h index 5dd15aac9ce7..a42caa370808 100644 --- a/drivers/staging/rt2870/rt2870.h +++ b/drivers/staging/rt2870/rt2870.h | |||
@@ -96,6 +96,7 @@ | |||
96 | {USB_DEVICE(0x0DF6,0x002B)}, /* Sitecom */ \ | 96 | {USB_DEVICE(0x0DF6,0x002B)}, /* Sitecom */ \ |
97 | {USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \ | 97 | {USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \ |
98 | {USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \ | 98 | {USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \ |
99 | {USB_DEVICE(0x0DF6,0x0039)}, /* Sitecom */ \ | ||
99 | {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \ | 100 | {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \ |
100 | {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \ | 101 | {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \ |
101 | {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \ | 102 | {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \ |
diff --git a/drivers/staging/rt2870/rt_main_dev.c b/drivers/staging/rt2870/rt_main_dev.c index 313ecea0bfa8..48ad41136d0f 100644 --- a/drivers/staging/rt2870/rt_main_dev.c +++ b/drivers/staging/rt2870/rt_main_dev.c | |||
@@ -855,6 +855,20 @@ err: | |||
855 | return (-1); | 855 | return (-1); |
856 | } /* End of rt28xx_open */ | 856 | } /* End of rt28xx_open */ |
857 | 857 | ||
858 | static const struct net_device_ops rt2870_netdev_ops = { | ||
859 | .ndo_open = MainVirtualIF_open, | ||
860 | .ndo_stop = MainVirtualIF_close, | ||
861 | .ndo_do_ioctl = rt28xx_ioctl, | ||
862 | .ndo_get_stats = RT28xx_get_ether_stats, | ||
863 | .ndo_validate_addr = NULL, | ||
864 | .ndo_set_mac_address = eth_mac_addr, | ||
865 | .ndo_change_mtu = eth_change_mtu, | ||
866 | #ifdef IKANOS_VX_1X0 | ||
867 | .ndo_start_xmit = IKANOS_DataFramesTx, | ||
868 | #else | ||
869 | .ndo_start_xmit = rt28xx_send_packets, | ||
870 | #endif | ||
871 | }; | ||
858 | 872 | ||
859 | /* Must not be called for mdev and apdev */ | 873 | /* Must not be called for mdev and apdev */ |
860 | static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) | 874 | static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) |
@@ -866,12 +880,6 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p | |||
866 | 880 | ||
867 | 881 | ||
868 | //ether_setup(dev); | 882 | //ether_setup(dev); |
869 | dev->hard_start_xmit = rt28xx_send_packets; | ||
870 | |||
871 | #ifdef IKANOS_VX_1X0 | ||
872 | dev->hard_start_xmit = IKANOS_DataFramesTx; | ||
873 | #endif // IKANOS_VX_1X0 // | ||
874 | |||
875 | // dev->set_multicast_list = ieee80211_set_multicast_list; | 883 | // dev->set_multicast_list = ieee80211_set_multicast_list; |
876 | // dev->change_mtu = ieee80211_change_mtu; | 884 | // dev->change_mtu = ieee80211_change_mtu; |
877 | #ifdef CONFIG_STA_SUPPORT | 885 | #ifdef CONFIG_STA_SUPPORT |
@@ -895,16 +903,10 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p | |||
895 | #if WIRELESS_EXT < 21 | 903 | #if WIRELESS_EXT < 21 |
896 | dev->get_wireless_stats = rt28xx_get_wireless_stats; | 904 | dev->get_wireless_stats = rt28xx_get_wireless_stats; |
897 | #endif | 905 | #endif |
898 | dev->get_stats = RT28xx_get_ether_stats; | ||
899 | dev->open = MainVirtualIF_open; //rt28xx_open; | ||
900 | dev->stop = MainVirtualIF_close; //rt28xx_close; | ||
901 | // dev->uninit = ieee80211_if_reinit; | 906 | // dev->uninit = ieee80211_if_reinit; |
902 | // dev->destructor = ieee80211_if_free; | 907 | // dev->destructor = ieee80211_if_free; |
903 | dev->priv_flags = INT_MAIN; | 908 | dev->priv_flags = INT_MAIN; |
904 | dev->do_ioctl = rt28xx_ioctl; | 909 | dev->netdev_ops = &rt2870_netdev_ops; |
905 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) | ||
906 | dev->validate_addr = NULL; | ||
907 | #endif | ||
908 | // find available device name | 910 | // find available device name |
909 | for (i = 0; i < 8; i++) | 911 | for (i = 0; i < 8; i++) |
910 | { | 912 | { |
diff --git a/drivers/staging/rt3070/rt_main_dev.c b/drivers/staging/rt3070/rt_main_dev.c index c000646286e6..81f769cf1096 100644 --- a/drivers/staging/rt3070/rt_main_dev.c +++ b/drivers/staging/rt3070/rt_main_dev.c | |||
@@ -436,7 +436,6 @@ static int rt28xx_init(IN struct net_device *net_dev) | |||
436 | // OID_SET_HT_PHYMODE SetHT; | 436 | // OID_SET_HT_PHYMODE SetHT; |
437 | // WPDMA_GLO_CFG_STRUC GloCfg; | 437 | // WPDMA_GLO_CFG_STRUC GloCfg; |
438 | UINT32 MacCsr0 = 0; | 438 | UINT32 MacCsr0 = 0; |
439 | UINT32 MacValue = 0; | ||
440 | 439 | ||
441 | #ifdef RT2870 | 440 | #ifdef RT2870 |
442 | #ifdef INF_AMAZON_SE | 441 | #ifdef INF_AMAZON_SE |
@@ -849,6 +848,20 @@ err: | |||
849 | return (-1); | 848 | return (-1); |
850 | } /* End of rt28xx_open */ | 849 | } /* End of rt28xx_open */ |
851 | 850 | ||
851 | static const struct net_device_ops rt3070_netdev_ops = { | ||
852 | .ndo_open = MainVirtualIF_open, | ||
853 | .ndo_stop = MainVirtualIF_close, | ||
854 | .ndo_do_ioctl = rt28xx_ioctl, | ||
855 | .ndo_get_stats = RT28xx_get_ether_stats, | ||
856 | .ndo_validate_addr = NULL, | ||
857 | .ndo_set_mac_address = eth_mac_addr, | ||
858 | .ndo_change_mtu = eth_change_mtu, | ||
859 | #ifdef IKANOS_VX_1X0 | ||
860 | .ndo_start_xmit = IKANOS_DataFramesTx, | ||
861 | #else | ||
862 | .ndo_start_xmit = rt28xx_send_packets, | ||
863 | #endif | ||
864 | }; | ||
852 | 865 | ||
853 | /* Must not be called for mdev and apdev */ | 866 | /* Must not be called for mdev and apdev */ |
854 | static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) | 867 | static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd) |
@@ -860,12 +873,6 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p | |||
860 | 873 | ||
861 | 874 | ||
862 | //ether_setup(dev); | 875 | //ether_setup(dev); |
863 | dev->hard_start_xmit = rt28xx_send_packets; | ||
864 | |||
865 | #ifdef IKANOS_VX_1X0 | ||
866 | dev->hard_start_xmit = IKANOS_DataFramesTx; | ||
867 | #endif // IKANOS_VX_1X0 // | ||
868 | |||
869 | // dev->set_multicast_list = ieee80211_set_multicast_list; | 876 | // dev->set_multicast_list = ieee80211_set_multicast_list; |
870 | // dev->change_mtu = ieee80211_change_mtu; | 877 | // dev->change_mtu = ieee80211_change_mtu; |
871 | #ifdef CONFIG_STA_SUPPORT | 878 | #ifdef CONFIG_STA_SUPPORT |
@@ -889,16 +896,10 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p | |||
889 | #if WIRELESS_EXT < 21 | 896 | #if WIRELESS_EXT < 21 |
890 | dev->get_wireless_stats = rt28xx_get_wireless_stats; | 897 | dev->get_wireless_stats = rt28xx_get_wireless_stats; |
891 | #endif | 898 | #endif |
892 | dev->get_stats = RT28xx_get_ether_stats; | ||
893 | dev->open = MainVirtualIF_open; //rt28xx_open; | ||
894 | dev->stop = MainVirtualIF_close; //rt28xx_close; | ||
895 | // dev->uninit = ieee80211_if_reinit; | 899 | // dev->uninit = ieee80211_if_reinit; |
896 | // dev->destructor = ieee80211_if_free; | 900 | // dev->destructor = ieee80211_if_free; |
897 | dev->priv_flags = INT_MAIN; | 901 | dev->priv_flags = INT_MAIN; |
898 | dev->do_ioctl = rt28xx_ioctl; | 902 | dev->netdev_ops = &rt3070_netdev_ops; |
899 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) | ||
900 | dev->validate_addr = NULL; | ||
901 | #endif | ||
902 | // find available device name | 903 | // find available device name |
903 | for (i = 0; i < 8; i++) | 904 | for (i = 0; i < 8; i++) |
904 | { | 905 | { |
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README index 2d5b1127ce51..70f49099c065 100644 --- a/drivers/staging/slicoss/README +++ b/drivers/staging/slicoss/README | |||
@@ -10,7 +10,36 @@ TODO: | |||
10 | - move firmware loading to request_firmware() | 10 | - move firmware loading to request_firmware() |
11 | - remove direct memory access of structures | 11 | - remove direct memory access of structures |
12 | - any remaining sparse and checkpatch.pl warnings | 12 | - any remaining sparse and checkpatch.pl warnings |
13 | - any netdev recommended changes | 13 | |
14 | - use net_device_ops | ||
15 | - use dev->stats rather than adapter->stats | ||
16 | - don't cast netdev_priv it is already void | ||
17 | - use compare_ether_addr | ||
18 | - GET RID OF MACROS | ||
19 | - work on all architectures | ||
20 | - without CONFIG_X86_64 confusion | ||
21 | - do 64 bit correctly | ||
22 | - don't depend on order of union | ||
23 | - get rid of ASSERT(), use BUG() instead but only where necessary | ||
24 | looks like most aren't really useful | ||
25 | - no new SIOCDEVPRIVATE ioctl allowed | ||
26 | - don't use module_param for configuring interrupt mitigation | ||
27 | use ethtool instead | ||
28 | - reorder code to elminate use of forward declarations | ||
29 | - don't keep private linked list of drivers. | ||
30 | - remove all the gratiutous debug infrastructure | ||
31 | - use PCI_DEVICE() | ||
32 | - do ethtool correctly using ethtool_ops | ||
33 | - NAPI? | ||
34 | - wasted overhead of extra stats | ||
35 | - state variables for things that are | ||
36 | easily availble and shouldn't be kept in card structure, cardnum, ... | ||
37 | slotnumber, events, ... | ||
38 | - get rid of slic_spinlock wrapper | ||
39 | - volatile == bad design => bad code | ||
40 | - locking too fine grained, not designed just throw more locks | ||
41 | at problem | ||
42 | |||
14 | 43 | ||
15 | Please send patches to: | 44 | Please send patches to: |
16 | Greg Kroah-Hartman <gregkh@suse.de> | 45 | Greg Kroah-Hartman <gregkh@suse.de> |
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c index 948156348478..6f5d0bff4358 100644 --- a/drivers/staging/slicoss/slicoss.c +++ b/drivers/staging/slicoss/slicoss.c | |||
@@ -345,6 +345,19 @@ static void slic_init_adapter(struct net_device *netdev, | |||
345 | return; | 345 | return; |
346 | } | 346 | } |
347 | 347 | ||
348 | static const struct net_device_ops slic_netdev_ops = { | ||
349 | .ndo_open = slic_entry_open, | ||
350 | .ndo_stop = slic_entry_halt, | ||
351 | .ndo_start_xmit = slic_xmit_start, | ||
352 | .ndo_do_ioctl = slic_ioctl, | ||
353 | .ndo_set_mac_address = slic_mac_set_address, | ||
354 | .ndo_get_stats = slic_get_stats, | ||
355 | .ndo_set_multicast_list = slic_mcast_set_list, | ||
356 | .ndo_validate_addr = eth_validate_addr, | ||
357 | .ndo_set_mac_address = eth_mac_addr, | ||
358 | .ndo_change_mtu = eth_change_mtu, | ||
359 | }; | ||
360 | |||
348 | static int __devinit slic_entry_probe(struct pci_dev *pcidev, | 361 | static int __devinit slic_entry_probe(struct pci_dev *pcidev, |
349 | const struct pci_device_id *pci_tbl_entry) | 362 | const struct pci_device_id *pci_tbl_entry) |
350 | { | 363 | { |
@@ -442,13 +455,7 @@ static int __devinit slic_entry_probe(struct pci_dev *pcidev, | |||
442 | 455 | ||
443 | netdev->base_addr = (unsigned long)adapter->memorybase; | 456 | netdev->base_addr = (unsigned long)adapter->memorybase; |
444 | netdev->irq = adapter->irq; | 457 | netdev->irq = adapter->irq; |
445 | netdev->open = slic_entry_open; | 458 | netdev->netdev_ops = &slic_netdev_ops; |
446 | netdev->stop = slic_entry_halt; | ||
447 | netdev->hard_start_xmit = slic_xmit_start; | ||
448 | netdev->do_ioctl = slic_ioctl; | ||
449 | netdev->set_mac_address = slic_mac_set_address; | ||
450 | netdev->get_stats = slic_get_stats; | ||
451 | netdev->set_multicast_list = slic_mcast_set_list; | ||
452 | 459 | ||
453 | slic_debug_adapter_create(adapter); | 460 | slic_debug_adapter_create(adapter); |
454 | 461 | ||
@@ -1260,7 +1267,7 @@ static int slic_mcast_add_list(struct adapter *adapter, char *address) | |||
1260 | } | 1267 | } |
1261 | 1268 | ||
1262 | /* Doesn't already exist. Allocate a structure to hold it */ | 1269 | /* Doesn't already exist. Allocate a structure to hold it */ |
1263 | mcaddr = kmalloc(sizeof(struct mcast_address), GFP_KERNEL); | 1270 | mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC); |
1264 | if (mcaddr == NULL) | 1271 | if (mcaddr == NULL) |
1265 | return 1; | 1272 | return 1; |
1266 | 1273 | ||
@@ -2284,7 +2291,7 @@ static u32 slic_card_locate(struct adapter *adapter) | |||
2284 | } | 2291 | } |
2285 | if (!physcard) { | 2292 | if (!physcard) { |
2286 | /* no structure allocated for this physical card yet */ | 2293 | /* no structure allocated for this physical card yet */ |
2287 | physcard = kzalloc(sizeof(struct physcard), GFP_KERNEL); | 2294 | physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC); |
2288 | ASSERT(physcard); | 2295 | ASSERT(physcard); |
2289 | 2296 | ||
2290 | physcard->next = slic_global.phys_card; | 2297 | physcard->next = slic_global.phys_card; |
diff --git a/drivers/staging/stlc45xx/Kconfig b/drivers/staging/stlc45xx/Kconfig index 8d3f46f190e8..947fb75a9c68 100644 --- a/drivers/staging/stlc45xx/Kconfig +++ b/drivers/staging/stlc45xx/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config STLC45XX | 1 | config STLC45XX |
2 | tristate "stlc4550/4560 support" | 2 | tristate "stlc4550/4560 support" |
3 | depends on MAC80211 && WLAN_80211 && SPI_MASTER | 3 | depends on MAC80211 && WLAN_80211 && SPI_MASTER && GENERIC_HARDIRQS |
4 | ---help--- | 4 | ---help--- |
5 | This is a driver for stlc4550 and stlc4560 chipsets. | 5 | This is a driver for stlc4550 and stlc4560 chipsets. |
6 | 6 | ||
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c index 891f6e334672..076b3f7d39eb 100644 --- a/drivers/staging/sxg/sxg.c +++ b/drivers/staging/sxg/sxg.c | |||
@@ -322,6 +322,8 @@ int sxg_add_msi_isr(struct adapter_t *adapter) | |||
322 | int ret,i; | 322 | int ret,i; |
323 | 323 | ||
324 | if (!adapter->intrregistered) { | 324 | if (!adapter->intrregistered) { |
325 | spin_unlock_irqrestore(&sxg_global.driver_lock, | ||
326 | sxg_global.flags); | ||
325 | for (i=0; i<adapter->nr_msix_entries; i++) { | 327 | for (i=0; i<adapter->nr_msix_entries; i++) { |
326 | ret = request_irq (adapter->msi_entries[i].vector, | 328 | ret = request_irq (adapter->msi_entries[i].vector, |
327 | sxg_isr, | 329 | sxg_isr, |
@@ -329,6 +331,8 @@ int sxg_add_msi_isr(struct adapter_t *adapter) | |||
329 | adapter->netdev->name, | 331 | adapter->netdev->name, |
330 | adapter->netdev); | 332 | adapter->netdev); |
331 | if (ret) { | 333 | if (ret) { |
334 | spin_lock_irqsave(&sxg_global.driver_lock, | ||
335 | sxg_global.flags); | ||
332 | DBG_ERROR("sxg: MSI-X request_irq (%s) " | 336 | DBG_ERROR("sxg: MSI-X request_irq (%s) " |
333 | "FAILED [%x]\n", adapter->netdev->name, | 337 | "FAILED [%x]\n", adapter->netdev->name, |
334 | ret); | 338 | ret); |
@@ -336,6 +340,7 @@ int sxg_add_msi_isr(struct adapter_t *adapter) | |||
336 | } | 340 | } |
337 | } | 341 | } |
338 | } | 342 | } |
343 | spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); | ||
339 | adapter->msi_enabled = TRUE; | 344 | adapter->msi_enabled = TRUE; |
340 | adapter->intrregistered = 1; | 345 | adapter->intrregistered = 1; |
341 | adapter->IntRegistered = TRUE; | 346 | adapter->IntRegistered = TRUE; |
@@ -896,6 +901,22 @@ static inline int sxg_read_config(struct adapter_t *adapter) | |||
896 | return status; | 901 | return status; |
897 | } | 902 | } |
898 | 903 | ||
904 | static const struct net_device_ops sxg_netdev_ops = { | ||
905 | .ndo_open = sxg_entry_open, | ||
906 | .ndo_stop = sxg_entry_halt, | ||
907 | .ndo_start_xmit = sxg_send_packets, | ||
908 | .ndo_do_ioctl = sxg_ioctl, | ||
909 | .ndo_change_mtu = sxg_change_mtu, | ||
910 | .ndo_get_stats = sxg_get_stats, | ||
911 | .ndo_set_multicast_list = sxg_mcast_set_list, | ||
912 | .ndo_validate_addr = eth_validate_addr, | ||
913 | #if XXXTODO | ||
914 | .ndo_set_mac_address = sxg_mac_set_address, | ||
915 | #else | ||
916 | .ndo_set_mac_address = eth_mac_addr, | ||
917 | #endif | ||
918 | }; | ||
919 | |||
899 | static int sxg_entry_probe(struct pci_dev *pcidev, | 920 | static int sxg_entry_probe(struct pci_dev *pcidev, |
900 | const struct pci_device_id *pci_tbl_entry) | 921 | const struct pci_device_id *pci_tbl_entry) |
901 | { | 922 | { |
@@ -1095,16 +1116,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev, | |||
1095 | 1116 | ||
1096 | netdev->base_addr = (unsigned long)adapter->base_addr; | 1117 | netdev->base_addr = (unsigned long)adapter->base_addr; |
1097 | netdev->irq = adapter->irq; | 1118 | netdev->irq = adapter->irq; |
1098 | netdev->open = sxg_entry_open; | 1119 | netdev->netdev_ops = &sxg_netdev_ops; |
1099 | netdev->stop = sxg_entry_halt; | ||
1100 | netdev->hard_start_xmit = sxg_send_packets; | ||
1101 | netdev->do_ioctl = sxg_ioctl; | ||
1102 | netdev->change_mtu = sxg_change_mtu; | ||
1103 | #if XXXTODO | ||
1104 | netdev->set_mac_address = sxg_mac_set_address; | ||
1105 | #endif | ||
1106 | netdev->get_stats = sxg_get_stats; | ||
1107 | netdev->set_multicast_list = sxg_mcast_set_list; | ||
1108 | SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops); | 1120 | SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops); |
1109 | netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 1121 | netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
1110 | err = sxg_set_interrupt_capability(adapter); | 1122 | err = sxg_set_interrupt_capability(adapter); |
@@ -2247,6 +2259,8 @@ static int sxg_entry_open(struct net_device *dev) | |||
2247 | DBG_ERROR("sxg: %s EXIT\n", __func__); | 2259 | DBG_ERROR("sxg: %s EXIT\n", __func__); |
2248 | 2260 | ||
2249 | spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); | 2261 | spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); |
2262 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
2263 | |||
2250 | return STATUS_SUCCESS; | 2264 | return STATUS_SUCCESS; |
2251 | } | 2265 | } |
2252 | 2266 | ||
@@ -2568,6 +2582,7 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, | |||
2568 | u64 phys_addr; | 2582 | u64 phys_addr; |
2569 | unsigned long flags; | 2583 | unsigned long flags; |
2570 | unsigned long queue_id=0; | 2584 | unsigned long queue_id=0; |
2585 | int offload_cksum = 0; | ||
2571 | 2586 | ||
2572 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", | 2587 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", |
2573 | pSgl, SxgSgl, 0, 0); | 2588 | pSgl, SxgSgl, 0, 0); |
@@ -2606,7 +2621,11 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, | |||
2606 | struct iphdr *ip; | 2621 | struct iphdr *ip; |
2607 | 2622 | ||
2608 | ip = ip_hdr(skb); | 2623 | ip = ip_hdr(skb); |
2609 | if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof( | 2624 | if (ip->protocol == IPPROTO_TCP) |
2625 | offload_cksum = 1; | ||
2626 | if (!offload_cksum || !tcp_hdr(skb)) | ||
2627 | queue_id = 0; | ||
2628 | else if (offload_cksum && (DataLength >= sizeof( | ||
2610 | struct tcphdr))){ | 2629 | struct tcphdr))){ |
2611 | queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? | 2630 | queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? |
2612 | (ntohs (tcp_hdr(skb)->source) & | 2631 | (ntohs (tcp_hdr(skb)->source) & |
@@ -2615,8 +2634,11 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, | |||
2615 | SXG_LARGE_SEND_QUEUE_MASK)); | 2634 | SXG_LARGE_SEND_QUEUE_MASK)); |
2616 | } | 2635 | } |
2617 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 2636 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
2618 | if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >= | 2637 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
2619 | sizeof(struct tcphdr)) ) { | 2638 | offload_cksum = 1; |
2639 | if (!offload_cksum || !tcp_hdr(skb)) | ||
2640 | queue_id = 0; | ||
2641 | else if (offload_cksum && (DataLength>=sizeof(struct tcphdr))){ | ||
2620 | queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? | 2642 | queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? |
2621 | (ntohs (tcp_hdr(skb)->source) & | 2643 | (ntohs (tcp_hdr(skb)->source) & |
2622 | SXG_LARGE_SEND_QUEUE_MASK): | 2644 | SXG_LARGE_SEND_QUEUE_MASK): |
@@ -2645,23 +2667,38 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, | |||
2645 | } | 2667 | } |
2646 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd", | 2668 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd", |
2647 | XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); | 2669 | XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); |
2648 | /* Update stats */ | 2670 | memset(XmtCmd, '\0', sizeof(*XmtCmd)); |
2649 | adapter->stats.tx_packets++; | 2671 | XmtCmd->SgEntries = 1; |
2650 | adapter->stats.tx_bytes += DataLength; | 2672 | XmtCmd->Flags = 0; |
2651 | #if XXXTODO /* Stats stuff */ | 2673 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2652 | if (SXG_MULTICAST_PACKET(EtherHdr)) { | 2674 | /* |
2653 | if (SXG_BROADCAST_PACKET(EtherHdr)) { | 2675 | * We need to set the Checkum in IP header to 0. This is |
2654 | adapter->Stats.DumbXmtBcastPkts++; | 2676 | * required by hardware. |
2655 | adapter->Stats.DumbXmtBcastBytes += DataLength; | 2677 | */ |
2678 | if (offload_cksum) { | ||
2679 | ip_hdr(skb)->check = 0x0; | ||
2680 | XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP; | ||
2681 | XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP; | ||
2682 | /* | ||
2683 | * Dont know if length will require a change in | ||
2684 | * case of VLAN | ||
2685 | */ | ||
2686 | XmtCmd->CsumFlags.MacLen = ETH_HLEN; | ||
2687 | XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >> | ||
2688 | SXG_NW_HDR_LEN_SHIFT; | ||
2656 | } else { | 2689 | } else { |
2657 | adapter->Stats.DumbXmtMcastPkts++; | 2690 | if (skb_checksum_help(skb)){ |
2658 | adapter->Stats.DumbXmtMcastBytes += DataLength; | 2691 | printk(KERN_EMERG "Dropped UDP packet for" |
2692 | " incorrect checksum calculation\n"); | ||
2693 | if (XmtCmd) | ||
2694 | SXG_ABORT_CMD(XmtRingInfo); | ||
2695 | spin_unlock_irqrestore(&adapter->XmtZeroLock, | ||
2696 | flags); | ||
2697 | return STATUS_SUCCESS; | ||
2698 | } | ||
2659 | } | 2699 | } |
2660 | } else { | ||
2661 | adapter->Stats.DumbXmtUcastPkts++; | ||
2662 | adapter->Stats.DumbXmtUcastBytes += DataLength; | ||
2663 | } | 2700 | } |
2664 | #endif | 2701 | |
2665 | /* | 2702 | /* |
2666 | * Fill in the command | 2703 | * Fill in the command |
2667 | * Copy out the first SGE to the command and adjust for offset | 2704 | * Copy out the first SGE to the command and adjust for offset |
@@ -2679,31 +2716,17 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, | |||
2679 | (SXG_INVALID_SGL(phys_addr,skb->data_len))) | 2716 | (SXG_INVALID_SGL(phys_addr,skb->data_len))) |
2680 | { | 2717 | { |
2681 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); | 2718 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); |
2719 | if (XmtCmd) | ||
2720 | SXG_ABORT_CMD(XmtRingInfo); | ||
2682 | /* Silently drop this packet */ | 2721 | /* Silently drop this packet */ |
2683 | printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n"); | 2722 | printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n"); |
2684 | return STATUS_SUCCESS; | 2723 | return STATUS_SUCCESS; |
2685 | } | 2724 | } |
2686 | memset(XmtCmd, '\0', sizeof(*XmtCmd)); | ||
2687 | XmtCmd->Buffer.FirstSgeAddress = phys_addr; | 2725 | XmtCmd->Buffer.FirstSgeAddress = phys_addr; |
2688 | XmtCmd->Buffer.FirstSgeLength = DataLength; | 2726 | XmtCmd->Buffer.FirstSgeLength = DataLength; |
2689 | XmtCmd->Buffer.SgeOffset = 0; | 2727 | XmtCmd->Buffer.SgeOffset = 0; |
2690 | XmtCmd->Buffer.TotalLength = DataLength; | 2728 | XmtCmd->Buffer.TotalLength = DataLength; |
2691 | XmtCmd->SgEntries = 1; | ||
2692 | XmtCmd->Flags = 0; | ||
2693 | 2729 | ||
2694 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
2695 | /* | ||
2696 | * We need to set the Checkum in IP header to 0. This is | ||
2697 | * required by hardware. | ||
2698 | */ | ||
2699 | ip_hdr(skb)->check = 0x0; | ||
2700 | XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP; | ||
2701 | XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP; | ||
2702 | /* Dont know if length will require a change in case of VLAN */ | ||
2703 | XmtCmd->CsumFlags.MacLen = ETH_HLEN; | ||
2704 | XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >> | ||
2705 | SXG_NW_HDR_LEN_SHIFT; | ||
2706 | } | ||
2707 | /* | 2730 | /* |
2708 | * Advance transmit cmd descripter by 1. | 2731 | * Advance transmit cmd descripter by 1. |
2709 | * NOTE - See comments in SxgTcpOutput where we write | 2732 | * NOTE - See comments in SxgTcpOutput where we write |
@@ -2715,6 +2738,24 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, | |||
2715 | ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0); | 2738 | ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0); |
2716 | WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE); | 2739 | WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE); |
2717 | adapter->Stats.XmtQLen++; /* Stats within lock */ | 2740 | adapter->Stats.XmtQLen++; /* Stats within lock */ |
2741 | /* Update stats */ | ||
2742 | adapter->stats.tx_packets++; | ||
2743 | adapter->stats.tx_bytes += DataLength; | ||
2744 | #if XXXTODO /* Stats stuff */ | ||
2745 | if (SXG_MULTICAST_PACKET(EtherHdr)) { | ||
2746 | if (SXG_BROADCAST_PACKET(EtherHdr)) { | ||
2747 | adapter->Stats.DumbXmtBcastPkts++; | ||
2748 | adapter->Stats.DumbXmtBcastBytes += DataLength; | ||
2749 | } else { | ||
2750 | adapter->Stats.DumbXmtMcastPkts++; | ||
2751 | adapter->Stats.DumbXmtMcastBytes += DataLength; | ||
2752 | } | ||
2753 | } else { | ||
2754 | adapter->Stats.DumbXmtUcastPkts++; | ||
2755 | adapter->Stats.DumbXmtUcastBytes += DataLength; | ||
2756 | } | ||
2757 | #endif | ||
2758 | |||
2718 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); | 2759 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); |
2719 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", | 2760 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", |
2720 | XmtCmd, pSgl, SxgSgl, 0); | 2761 | XmtCmd, pSgl, SxgSgl, 0); |
diff --git a/drivers/staging/uc2322/aten2011.c b/drivers/staging/uc2322/aten2011.c index 85b705453066..9c62f787cc9c 100644 --- a/drivers/staging/uc2322/aten2011.c +++ b/drivers/staging/uc2322/aten2011.c | |||
@@ -603,10 +603,9 @@ static void ATEN2011_bulk_out_data_callback(struct urb *urb) | |||
603 | 603 | ||
604 | tty = tty_port_tty_get(&ATEN2011_port->port->port); | 604 | tty = tty_port_tty_get(&ATEN2011_port->port->port); |
605 | 605 | ||
606 | if (tty && ATEN2011_port->open) { | 606 | if (tty && ATEN2011_port->open) |
607 | /* tell the tty driver that something has changed */ | 607 | /* tell the tty driver that something has changed */ |
608 | wake_up_interruptible(&tty->write_wait); | 608 | tty_wakeup(tty); |
609 | } | ||
610 | 609 | ||
611 | /* schedule_work(&ATEN2011_port->port->work); */ | 610 | /* schedule_work(&ATEN2011_port->port->work); */ |
612 | tty_kref_put(tty); | 611 | tty_kref_put(tty); |
@@ -825,12 +824,6 @@ static int ATEN2011_open(struct tty_struct *tty, struct usb_serial_port *port, | |||
825 | status = 0; | 824 | status = 0; |
826 | status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); | 825 | status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); |
827 | 826 | ||
828 | /* force low_latency on so that our tty_push actually forces * | ||
829 | * the data through,otherwise it is scheduled, and with * | ||
830 | * high data rates (like with OHCI) data can get lost. */ | ||
831 | |||
832 | if (tty) | ||
833 | tty->low_latency = 1; | ||
834 | /* | 827 | /* |
835 | * Check to see if we've set up our endpoint info yet | 828 | * Check to see if we've set up our endpoint info yet |
836 | * (can't set it up in ATEN2011_startup as the structures | 829 | * (can't set it up in ATEN2011_startup as the structures |
@@ -1473,22 +1466,7 @@ static void ATEN2011_set_termios(struct tty_struct *tty, | |||
1473 | 1466 | ||
1474 | cflag = tty->termios->c_cflag; | 1467 | cflag = tty->termios->c_cflag; |
1475 | 1468 | ||
1476 | if (!cflag) { | 1469 | dbg("%s - cflag %08x iflag %08x", __func__, |
1477 | dbg("%s %s", __func__, "cflag is NULL"); | ||
1478 | return; | ||
1479 | } | ||
1480 | |||
1481 | /* check that they really want us to change something */ | ||
1482 | if (old_termios) { | ||
1483 | if ((cflag == old_termios->c_cflag) && | ||
1484 | (RELEVANT_IFLAG(tty->termios->c_iflag) == | ||
1485 | RELEVANT_IFLAG(old_termios->c_iflag))) { | ||
1486 | dbg("%s", "Nothing to change"); | ||
1487 | return; | ||
1488 | } | ||
1489 | } | ||
1490 | |||
1491 | dbg("%s - clfag %08x iflag %08x", __func__, | ||
1492 | tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); | 1470 | tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); |
1493 | 1471 | ||
1494 | if (old_termios) { | 1472 | if (old_termios) { |
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index b2a606a36936..393e4df70dfd 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c | |||
@@ -711,6 +711,20 @@ static int wlan_change_mtu(netdevice_t *dev, int new_mtu) | |||
711 | return 0; | 711 | return 0; |
712 | } | 712 | } |
713 | 713 | ||
714 | static const struct net_device_ops p80211_netdev_ops = { | ||
715 | .ndo_init = p80211knetdev_init, | ||
716 | .ndo_open = p80211knetdev_open, | ||
717 | .ndo_stop = p80211knetdev_stop, | ||
718 | .ndo_get_stats = p80211knetdev_get_stats, | ||
719 | .ndo_start_xmit = p80211knetdev_hard_start_xmit, | ||
720 | .ndo_set_multicast_list = p80211knetdev_set_multicast_list, | ||
721 | .ndo_do_ioctl = p80211knetdev_do_ioctl, | ||
722 | .ndo_set_mac_address = p80211knetdev_set_mac_address, | ||
723 | .ndo_tx_timeout = p80211knetdev_tx_timeout, | ||
724 | .ndo_change_mtu = wlan_change_mtu, | ||
725 | .ndo_validate_addr = eth_validate_addr, | ||
726 | }; | ||
727 | |||
714 | /*---------------------------------------------------------------- | 728 | /*---------------------------------------------------------------- |
715 | * wlan_setup | 729 | * wlan_setup |
716 | * | 730 | * |
@@ -756,11 +770,7 @@ int wlan_setup(wlandevice_t *wlandev) | |||
756 | } else { | 770 | } else { |
757 | wlandev->netdev = dev; | 771 | wlandev->netdev = dev; |
758 | dev->ml_priv = wlandev; | 772 | dev->ml_priv = wlandev; |
759 | dev->hard_start_xmit = p80211knetdev_hard_start_xmit; | 773 | dev->netdev_ops = &p80211_netdev_ops; |
760 | dev->get_stats = p80211knetdev_get_stats; | ||
761 | dev->init = p80211knetdev_init; | ||
762 | dev->open = p80211knetdev_open; | ||
763 | dev->stop = p80211knetdev_stop; | ||
764 | 774 | ||
765 | mutex_init(&wlandev->ioctl_lock); | 775 | mutex_init(&wlandev->ioctl_lock); |
766 | /* block ioctls until fully initialised. Don't forget to call | 776 | /* block ioctls until fully initialised. Don't forget to call |
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c index c60b8fcf0e3e..28034c812914 100644 --- a/drivers/uio/uio_cif.c +++ b/drivers/uio/uio_cif.c | |||
@@ -147,5 +147,6 @@ static void __exit hilscher_exit_module(void) | |||
147 | module_init(hilscher_init_module); | 147 | module_init(hilscher_init_module); |
148 | module_exit(hilscher_exit_module); | 148 | module_exit(hilscher_exit_module); |
149 | 149 | ||
150 | MODULE_DEVICE_TABLE(pci, hilscher_pci_ids); | ||
150 | MODULE_LICENSE("GPL v2"); | 151 | MODULE_LICENSE("GPL v2"); |
151 | MODULE_AUTHOR("Hans J. Koch, Benedikt Spranger"); | 152 | MODULE_AUTHOR("Hans J. Koch, Benedikt Spranger"); |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 869d47cb6db3..0a69c0977e3f 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -546,10 +546,6 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp) | |||
546 | tty->driver_data = acm; | 546 | tty->driver_data = acm; |
547 | acm->tty = tty; | 547 | acm->tty = tty; |
548 | 548 | ||
549 | /* force low_latency on so that our tty_push actually forces the data through, | ||
550 | otherwise it is scheduled, and with high data rates data can get lost. */ | ||
551 | tty->low_latency = 1; | ||
552 | |||
553 | if (usb_autopm_get_interface(acm->control) < 0) | 549 | if (usb_autopm_get_interface(acm->control) < 0) |
554 | goto early_bail; | 550 | goto early_bail; |
555 | else | 551 | else |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 3771d6e6d0cc..34e6108e1d42 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
@@ -652,7 +652,7 @@ next_desc: | |||
652 | 652 | ||
653 | iface = &intf->altsetting[0]; | 653 | iface = &intf->altsetting[0]; |
654 | ep = &iface->endpoint[0].desc; | 654 | ep = &iface->endpoint[0].desc; |
655 | if (!usb_endpoint_is_int_in(ep)) { | 655 | if (!ep || !usb_endpoint_is_int_in(ep)) { |
656 | rv = -EINVAL; | 656 | rv = -EINVAL; |
657 | goto err; | 657 | goto err; |
658 | } | 658 | } |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index df3c539f652a..308609039c73 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -841,7 +841,7 @@ static int proc_resetep(struct dev_state *ps, void __user *arg) | |||
841 | ret = checkintf(ps, ret); | 841 | ret = checkintf(ps, ret); |
842 | if (ret) | 842 | if (ret) |
843 | return ret; | 843 | return ret; |
844 | usb_settoggle(ps->dev, ep & 0xf, !(ep & USB_DIR_IN), 0); | 844 | usb_reset_endpoint(ps->dev, ep); |
845 | return 0; | 845 | return 0; |
846 | } | 846 | } |
847 | 847 | ||
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 81fa8506825d..42b93da1085d 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -1539,6 +1539,32 @@ void usb_hcd_disable_endpoint(struct usb_device *udev, | |||
1539 | hcd->driver->endpoint_disable(hcd, ep); | 1539 | hcd->driver->endpoint_disable(hcd, ep); |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | /** | ||
1543 | * usb_hcd_reset_endpoint - reset host endpoint state | ||
1544 | * @udev: USB device. | ||
1545 | * @ep: the endpoint to reset. | ||
1546 | * | ||
1547 | * Resets any host endpoint state such as the toggle bit, sequence | ||
1548 | * number and current window. | ||
1549 | */ | ||
1550 | void usb_hcd_reset_endpoint(struct usb_device *udev, | ||
1551 | struct usb_host_endpoint *ep) | ||
1552 | { | ||
1553 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | ||
1554 | |||
1555 | if (hcd->driver->endpoint_reset) | ||
1556 | hcd->driver->endpoint_reset(hcd, ep); | ||
1557 | else { | ||
1558 | int epnum = usb_endpoint_num(&ep->desc); | ||
1559 | int is_out = usb_endpoint_dir_out(&ep->desc); | ||
1560 | int is_control = usb_endpoint_xfer_control(&ep->desc); | ||
1561 | |||
1562 | usb_settoggle(udev, epnum, is_out, 0); | ||
1563 | if (is_control) | ||
1564 | usb_settoggle(udev, epnum, !is_out, 0); | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1542 | /* Protect against drivers that try to unlink URBs after the device | 1568 | /* Protect against drivers that try to unlink URBs after the device |
1543 | * is gone, by waiting until all unlinks for @udev are finished. | 1569 | * is gone, by waiting until all unlinks for @udev are finished. |
1544 | * Since we don't currently track URBs by device, simply wait until | 1570 | * Since we don't currently track URBs by device, simply wait until |
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h index f750eb1ab595..e7d4479de41c 100644 --- a/drivers/usb/core/hcd.h +++ b/drivers/usb/core/hcd.h | |||
@@ -206,6 +206,11 @@ struct hc_driver { | |||
206 | void (*endpoint_disable)(struct usb_hcd *hcd, | 206 | void (*endpoint_disable)(struct usb_hcd *hcd, |
207 | struct usb_host_endpoint *ep); | 207 | struct usb_host_endpoint *ep); |
208 | 208 | ||
209 | /* (optional) reset any endpoint state such as sequence number | ||
210 | and current window */ | ||
211 | void (*endpoint_reset)(struct usb_hcd *hcd, | ||
212 | struct usb_host_endpoint *ep); | ||
213 | |||
209 | /* root hub support */ | 214 | /* root hub support */ |
210 | int (*hub_status_data) (struct usb_hcd *hcd, char *buf); | 215 | int (*hub_status_data) (struct usb_hcd *hcd, char *buf); |
211 | int (*hub_control) (struct usb_hcd *hcd, | 216 | int (*hub_control) (struct usb_hcd *hcd, |
@@ -234,6 +239,8 @@ extern void usb_hcd_flush_endpoint(struct usb_device *udev, | |||
234 | struct usb_host_endpoint *ep); | 239 | struct usb_host_endpoint *ep); |
235 | extern void usb_hcd_disable_endpoint(struct usb_device *udev, | 240 | extern void usb_hcd_disable_endpoint(struct usb_device *udev, |
236 | struct usb_host_endpoint *ep); | 241 | struct usb_host_endpoint *ep); |
242 | extern void usb_hcd_reset_endpoint(struct usb_device *udev, | ||
243 | struct usb_host_endpoint *ep); | ||
237 | extern void usb_hcd_synchronize_unlinks(struct usb_device *udev); | 244 | extern void usb_hcd_synchronize_unlinks(struct usb_device *udev); |
238 | extern int usb_hcd_get_frame_number(struct usb_device *udev); | 245 | extern int usb_hcd_get_frame_number(struct usb_device *udev); |
239 | 246 | ||
@@ -279,6 +286,13 @@ extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); | |||
279 | extern void usb_hc_died(struct usb_hcd *hcd); | 286 | extern void usb_hc_died(struct usb_hcd *hcd); |
280 | extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); | 287 | extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); |
281 | 288 | ||
289 | /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ | ||
290 | #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) | ||
291 | #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) | ||
292 | #define usb_settoggle(dev, ep, out, bit) \ | ||
293 | ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \ | ||
294 | ((bit) << (ep))) | ||
295 | |||
282 | /* -------------------------------------------------------------------------- */ | 296 | /* -------------------------------------------------------------------------- */ |
283 | 297 | ||
284 | /* Enumeration is only for the hub driver, or HCD virtual root hubs */ | 298 | /* Enumeration is only for the hub driver, or HCD virtual root hubs */ |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 30a0690f3683..b62628377654 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -1002,8 +1002,7 @@ int usb_clear_halt(struct usb_device *dev, int pipe) | |||
1002 | * the copy in usb-storage, for as long as we need two copies. | 1002 | * the copy in usb-storage, for as long as we need two copies. |
1003 | */ | 1003 | */ |
1004 | 1004 | ||
1005 | /* toggle was reset by the clear */ | 1005 | usb_reset_endpoint(dev, endp); |
1006 | usb_settoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe), 0); | ||
1007 | 1006 | ||
1008 | return 0; | 1007 | return 0; |
1009 | } | 1008 | } |
@@ -1076,6 +1075,30 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, | |||
1076 | } | 1075 | } |
1077 | 1076 | ||
1078 | /** | 1077 | /** |
1078 | * usb_reset_endpoint - Reset an endpoint's state. | ||
1079 | * @dev: the device whose endpoint is to be reset | ||
1080 | * @epaddr: the endpoint's address. Endpoint number for output, | ||
1081 | * endpoint number + USB_DIR_IN for input | ||
1082 | * | ||
1083 | * Resets any host-side endpoint state such as the toggle bit, | ||
1084 | * sequence number or current window. | ||
1085 | */ | ||
1086 | void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr) | ||
1087 | { | ||
1088 | unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; | ||
1089 | struct usb_host_endpoint *ep; | ||
1090 | |||
1091 | if (usb_endpoint_out(epaddr)) | ||
1092 | ep = dev->ep_out[epnum]; | ||
1093 | else | ||
1094 | ep = dev->ep_in[epnum]; | ||
1095 | if (ep) | ||
1096 | usb_hcd_reset_endpoint(dev, ep); | ||
1097 | } | ||
1098 | EXPORT_SYMBOL_GPL(usb_reset_endpoint); | ||
1099 | |||
1100 | |||
1101 | /** | ||
1079 | * usb_disable_interface -- Disable all endpoints for an interface | 1102 | * usb_disable_interface -- Disable all endpoints for an interface |
1080 | * @dev: the device whose interface is being disabled | 1103 | * @dev: the device whose interface is being disabled |
1081 | * @intf: pointer to the interface descriptor | 1104 | * @intf: pointer to the interface descriptor |
@@ -1117,7 +1140,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) | |||
1117 | usb_disable_endpoint(dev, i, true); | 1140 | usb_disable_endpoint(dev, i, true); |
1118 | usb_disable_endpoint(dev, i + USB_DIR_IN, true); | 1141 | usb_disable_endpoint(dev, i + USB_DIR_IN, true); |
1119 | } | 1142 | } |
1120 | dev->toggle[0] = dev->toggle[1] = 0; | ||
1121 | 1143 | ||
1122 | /* getting rid of interfaces will disconnect | 1144 | /* getting rid of interfaces will disconnect |
1123 | * any drivers bound to them (a key side effect) | 1145 | * any drivers bound to them (a key side effect) |
@@ -1154,28 +1176,24 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) | |||
1154 | * usb_enable_endpoint - Enable an endpoint for USB communications | 1176 | * usb_enable_endpoint - Enable an endpoint for USB communications |
1155 | * @dev: the device whose interface is being enabled | 1177 | * @dev: the device whose interface is being enabled |
1156 | * @ep: the endpoint | 1178 | * @ep: the endpoint |
1157 | * @reset_toggle: flag to set the endpoint's toggle back to 0 | 1179 | * @reset_ep: flag to reset the endpoint state |
1158 | * | 1180 | * |
1159 | * Resets the endpoint toggle if asked, and sets dev->ep_{in,out} pointers. | 1181 | * Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers. |
1160 | * For control endpoints, both the input and output sides are handled. | 1182 | * For control endpoints, both the input and output sides are handled. |
1161 | */ | 1183 | */ |
1162 | void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep, | 1184 | void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep, |
1163 | bool reset_toggle) | 1185 | bool reset_ep) |
1164 | { | 1186 | { |
1165 | int epnum = usb_endpoint_num(&ep->desc); | 1187 | int epnum = usb_endpoint_num(&ep->desc); |
1166 | int is_out = usb_endpoint_dir_out(&ep->desc); | 1188 | int is_out = usb_endpoint_dir_out(&ep->desc); |
1167 | int is_control = usb_endpoint_xfer_control(&ep->desc); | 1189 | int is_control = usb_endpoint_xfer_control(&ep->desc); |
1168 | 1190 | ||
1169 | if (is_out || is_control) { | 1191 | if (reset_ep) |
1170 | if (reset_toggle) | 1192 | usb_hcd_reset_endpoint(dev, ep); |
1171 | usb_settoggle(dev, epnum, 1, 0); | 1193 | if (is_out || is_control) |
1172 | dev->ep_out[epnum] = ep; | 1194 | dev->ep_out[epnum] = ep; |
1173 | } | 1195 | if (!is_out || is_control) |
1174 | if (!is_out || is_control) { | ||
1175 | if (reset_toggle) | ||
1176 | usb_settoggle(dev, epnum, 0, 0); | ||
1177 | dev->ep_in[epnum] = ep; | 1196 | dev->ep_in[epnum] = ep; |
1178 | } | ||
1179 | ep->enabled = 1; | 1197 | ep->enabled = 1; |
1180 | } | 1198 | } |
1181 | 1199 | ||
@@ -1183,18 +1201,18 @@ void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep, | |||
1183 | * usb_enable_interface - Enable all the endpoints for an interface | 1201 | * usb_enable_interface - Enable all the endpoints for an interface |
1184 | * @dev: the device whose interface is being enabled | 1202 | * @dev: the device whose interface is being enabled |
1185 | * @intf: pointer to the interface descriptor | 1203 | * @intf: pointer to the interface descriptor |
1186 | * @reset_toggles: flag to set the endpoints' toggles back to 0 | 1204 | * @reset_eps: flag to reset the endpoints' state |
1187 | * | 1205 | * |
1188 | * Enables all the endpoints for the interface's current altsetting. | 1206 | * Enables all the endpoints for the interface's current altsetting. |
1189 | */ | 1207 | */ |
1190 | void usb_enable_interface(struct usb_device *dev, | 1208 | void usb_enable_interface(struct usb_device *dev, |
1191 | struct usb_interface *intf, bool reset_toggles) | 1209 | struct usb_interface *intf, bool reset_eps) |
1192 | { | 1210 | { |
1193 | struct usb_host_interface *alt = intf->cur_altsetting; | 1211 | struct usb_host_interface *alt = intf->cur_altsetting; |
1194 | int i; | 1212 | int i; |
1195 | 1213 | ||
1196 | for (i = 0; i < alt->desc.bNumEndpoints; ++i) | 1214 | for (i = 0; i < alt->desc.bNumEndpoints; ++i) |
1197 | usb_enable_endpoint(dev, &alt->endpoint[i], reset_toggles); | 1215 | usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps); |
1198 | } | 1216 | } |
1199 | 1217 | ||
1200 | /** | 1218 | /** |
@@ -1335,7 +1353,7 @@ EXPORT_SYMBOL_GPL(usb_set_interface); | |||
1335 | * This issues a standard SET_CONFIGURATION request to the device using | 1353 | * This issues a standard SET_CONFIGURATION request to the device using |
1336 | * the current configuration. The effect is to reset most USB-related | 1354 | * the current configuration. The effect is to reset most USB-related |
1337 | * state in the device, including interface altsettings (reset to zero), | 1355 | * state in the device, including interface altsettings (reset to zero), |
1338 | * endpoint halts (cleared), and data toggle (only for bulk and interrupt | 1356 | * endpoint halts (cleared), and endpoint state (only for bulk and interrupt |
1339 | * endpoints). Other usbcore state is unchanged, including bindings of | 1357 | * endpoints). Other usbcore state is unchanged, including bindings of |
1340 | * usb device drivers to interfaces. | 1358 | * usb device drivers to interfaces. |
1341 | * | 1359 | * |
@@ -1343,7 +1361,7 @@ EXPORT_SYMBOL_GPL(usb_set_interface); | |||
1343 | * (multi-interface) devices. Instead, the driver for each interface may | 1361 | * (multi-interface) devices. Instead, the driver for each interface may |
1344 | * use usb_set_interface() on the interfaces it claims. Be careful though; | 1362 | * use usb_set_interface() on the interfaces it claims. Be careful though; |
1345 | * some devices don't support the SET_INTERFACE request, and others won't | 1363 | * some devices don't support the SET_INTERFACE request, and others won't |
1346 | * reset all the interface state (notably data toggles). Resetting the whole | 1364 | * reset all the interface state (notably endpoint state). Resetting the whole |
1347 | * configuration would affect other drivers' interfaces. | 1365 | * configuration would affect other drivers' interfaces. |
1348 | * | 1366 | * |
1349 | * The caller must own the device lock. | 1367 | * The caller must own the device lock. |
@@ -1376,8 +1394,6 @@ int usb_reset_configuration(struct usb_device *dev) | |||
1376 | if (retval < 0) | 1394 | if (retval < 0) |
1377 | return retval; | 1395 | return retval; |
1378 | 1396 | ||
1379 | dev->toggle[0] = dev->toggle[1] = 0; | ||
1380 | |||
1381 | /* re-init hc/hcd interface/endpoint state */ | 1397 | /* re-init hc/hcd interface/endpoint state */ |
1382 | for (i = 0; i < config->desc.bNumInterfaces; i++) { | 1398 | for (i = 0; i < config->desc.bNumInterfaces; i++) { |
1383 | struct usb_interface *intf = config->interface[i]; | 1399 | struct usb_interface *intf = config->interface[i]; |
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index dcfc072630c1..7eee400d3e32 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
@@ -362,7 +362,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, | |||
362 | dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; | 362 | dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; |
363 | dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; | 363 | dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; |
364 | /* ep0 maxpacket comes later, from device descriptor */ | 364 | /* ep0 maxpacket comes later, from device descriptor */ |
365 | usb_enable_endpoint(dev, &dev->ep0, true); | 365 | usb_enable_endpoint(dev, &dev->ep0, false); |
366 | dev->can_submit = 1; | 366 | dev->can_submit = 1; |
367 | 367 | ||
368 | /* Save readable and stable topology id, distinguishing devices | 368 | /* Save readable and stable topology id, distinguishing devices |
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 96d65ca06ecd..4007770f7ed2 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c | |||
@@ -175,12 +175,6 @@ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) | |||
175 | strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info); | 175 | strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info); |
176 | } | 176 | } |
177 | 177 | ||
178 | static u32 eth_get_link(struct net_device *net) | ||
179 | { | ||
180 | struct eth_dev *dev = netdev_priv(net); | ||
181 | return dev->gadget->speed != USB_SPEED_UNKNOWN; | ||
182 | } | ||
183 | |||
184 | /* REVISIT can also support: | 178 | /* REVISIT can also support: |
185 | * - WOL (by tracking suspends and issuing remote wakeup) | 179 | * - WOL (by tracking suspends and issuing remote wakeup) |
186 | * - msglevel (implies updated messaging) | 180 | * - msglevel (implies updated messaging) |
@@ -189,7 +183,7 @@ static u32 eth_get_link(struct net_device *net) | |||
189 | 183 | ||
190 | static struct ethtool_ops ops = { | 184 | static struct ethtool_ops ops = { |
191 | .get_drvinfo = eth_get_drvinfo, | 185 | .get_drvinfo = eth_get_drvinfo, |
192 | .get_link = eth_get_link | 186 | .get_link = ethtool_op_get_link, |
193 | }; | 187 | }; |
194 | 188 | ||
195 | static void defer_kevent(struct eth_dev *dev, int flag) | 189 | static void defer_kevent(struct eth_dev *dev, int flag) |
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 4ed228a89943..bb5e6f671578 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c | |||
@@ -280,7 +280,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) | |||
280 | * are always powered while this driver is active, and use | 280 | * are always powered while this driver is active, and use |
281 | * active-low power switches. | 281 | * active-low power switches. |
282 | */ | 282 | */ |
283 | for (i = 0; i < pdata->ports; i++) { | 283 | for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) { |
284 | if (pdata->vbus_pin[i] <= 0) | 284 | if (pdata->vbus_pin[i] <= 0) |
285 | continue; | 285 | continue; |
286 | gpio_request(pdata->vbus_pin[i], "ohci_vbus"); | 286 | gpio_request(pdata->vbus_pin[i], "ohci_vbus"); |
@@ -298,7 +298,7 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) | |||
298 | int i; | 298 | int i; |
299 | 299 | ||
300 | if (pdata) { | 300 | if (pdata) { |
301 | for (i = 0; i < pdata->ports; i++) { | 301 | for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) { |
302 | if (pdata->vbus_pin[i] <= 0) | 302 | if (pdata->vbus_pin[i] <= 0) |
303 | continue; | 303 | continue; |
304 | gpio_direction_output(pdata->vbus_pin[i], 1); | 304 | gpio_direction_output(pdata->vbus_pin[i], 1); |
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c index 958751ccea43..c2050785a819 100644 --- a/drivers/usb/host/whci/asl.c +++ b/drivers/usb/host/whci/asl.c | |||
@@ -122,7 +122,8 @@ static uint32_t process_qset(struct whc *whc, struct whc_qset *qset) | |||
122 | process_inactive_qtd(whc, qset, td); | 122 | process_inactive_qtd(whc, qset, td); |
123 | } | 123 | } |
124 | 124 | ||
125 | update |= qset_add_qtds(whc, qset); | 125 | if (!qset->remove) |
126 | update |= qset_add_qtds(whc, qset); | ||
126 | 127 | ||
127 | done: | 128 | done: |
128 | /* | 129 | /* |
@@ -254,23 +255,29 @@ int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) | |||
254 | 255 | ||
255 | spin_lock_irqsave(&whc->lock, flags); | 256 | spin_lock_irqsave(&whc->lock, flags); |
256 | 257 | ||
258 | err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); | ||
259 | if (err < 0) { | ||
260 | spin_unlock_irqrestore(&whc->lock, flags); | ||
261 | return err; | ||
262 | } | ||
263 | |||
257 | qset = get_qset(whc, urb, GFP_ATOMIC); | 264 | qset = get_qset(whc, urb, GFP_ATOMIC); |
258 | if (qset == NULL) | 265 | if (qset == NULL) |
259 | err = -ENOMEM; | 266 | err = -ENOMEM; |
260 | else | 267 | else |
261 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); | 268 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); |
262 | if (!err) { | 269 | if (!err) { |
263 | usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); | ||
264 | if (!qset->in_sw_list) | 270 | if (!qset->in_sw_list) |
265 | asl_qset_insert_begin(whc, qset); | 271 | asl_qset_insert_begin(whc, qset); |
266 | } | 272 | } else |
273 | usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); | ||
267 | 274 | ||
268 | spin_unlock_irqrestore(&whc->lock, flags); | 275 | spin_unlock_irqrestore(&whc->lock, flags); |
269 | 276 | ||
270 | if (!err) | 277 | if (!err) |
271 | queue_work(whc->workqueue, &whc->async_work); | 278 | queue_work(whc->workqueue, &whc->async_work); |
272 | 279 | ||
273 | return 0; | 280 | return err; |
274 | } | 281 | } |
275 | 282 | ||
276 | /** | 283 | /** |
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c index 1569afd6245b..e019a5058ab8 100644 --- a/drivers/usb/host/whci/hcd.c +++ b/drivers/usb/host/whci/hcd.c | |||
@@ -186,6 +186,28 @@ static void whc_endpoint_disable(struct usb_hcd *usb_hcd, | |||
186 | } | 186 | } |
187 | } | 187 | } |
188 | 188 | ||
189 | static void whc_endpoint_reset(struct usb_hcd *usb_hcd, | ||
190 | struct usb_host_endpoint *ep) | ||
191 | { | ||
192 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | ||
193 | struct whc *whc = wusbhc_to_whc(wusbhc); | ||
194 | struct whc_qset *qset; | ||
195 | |||
196 | qset = ep->hcpriv; | ||
197 | if (qset) { | ||
198 | qset->remove = 1; | ||
199 | |||
200 | if (usb_endpoint_xfer_bulk(&ep->desc) | ||
201 | || usb_endpoint_xfer_control(&ep->desc)) | ||
202 | queue_work(whc->workqueue, &whc->async_work); | ||
203 | else | ||
204 | queue_work(whc->workqueue, &whc->periodic_work); | ||
205 | |||
206 | qset_reset(whc, qset); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | |||
189 | static struct hc_driver whc_hc_driver = { | 211 | static struct hc_driver whc_hc_driver = { |
190 | .description = "whci-hcd", | 212 | .description = "whci-hcd", |
191 | .product_desc = "Wireless host controller", | 213 | .product_desc = "Wireless host controller", |
@@ -200,6 +222,7 @@ static struct hc_driver whc_hc_driver = { | |||
200 | .urb_enqueue = whc_urb_enqueue, | 222 | .urb_enqueue = whc_urb_enqueue, |
201 | .urb_dequeue = whc_urb_dequeue, | 223 | .urb_dequeue = whc_urb_dequeue, |
202 | .endpoint_disable = whc_endpoint_disable, | 224 | .endpoint_disable = whc_endpoint_disable, |
225 | .endpoint_reset = whc_endpoint_reset, | ||
203 | 226 | ||
204 | .hub_status_data = wusbhc_rh_status_data, | 227 | .hub_status_data = wusbhc_rh_status_data, |
205 | .hub_control = wusbhc_rh_control, | 228 | .hub_control = wusbhc_rh_control, |
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c index df8b85f07092..ff4ef9e910d9 100644 --- a/drivers/usb/host/whci/pzl.c +++ b/drivers/usb/host/whci/pzl.c | |||
@@ -128,7 +128,8 @@ static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset) | |||
128 | process_inactive_qtd(whc, qset, td); | 128 | process_inactive_qtd(whc, qset, td); |
129 | } | 129 | } |
130 | 130 | ||
131 | update |= qset_add_qtds(whc, qset); | 131 | if (!qset->remove) |
132 | update |= qset_add_qtds(whc, qset); | ||
132 | 133 | ||
133 | done: | 134 | done: |
134 | /* | 135 | /* |
@@ -282,23 +283,29 @@ int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) | |||
282 | 283 | ||
283 | spin_lock_irqsave(&whc->lock, flags); | 284 | spin_lock_irqsave(&whc->lock, flags); |
284 | 285 | ||
286 | err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); | ||
287 | if (err < 0) { | ||
288 | spin_unlock_irqrestore(&whc->lock, flags); | ||
289 | return err; | ||
290 | } | ||
291 | |||
285 | qset = get_qset(whc, urb, GFP_ATOMIC); | 292 | qset = get_qset(whc, urb, GFP_ATOMIC); |
286 | if (qset == NULL) | 293 | if (qset == NULL) |
287 | err = -ENOMEM; | 294 | err = -ENOMEM; |
288 | else | 295 | else |
289 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); | 296 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); |
290 | if (!err) { | 297 | if (!err) { |
291 | usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); | ||
292 | if (!qset->in_sw_list) | 298 | if (!qset->in_sw_list) |
293 | qset_insert_in_sw_list(whc, qset); | 299 | qset_insert_in_sw_list(whc, qset); |
294 | } | 300 | } else |
301 | usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); | ||
295 | 302 | ||
296 | spin_unlock_irqrestore(&whc->lock, flags); | 303 | spin_unlock_irqrestore(&whc->lock, flags); |
297 | 304 | ||
298 | if (!err) | 305 | if (!err) |
299 | queue_work(whc->workqueue, &whc->periodic_work); | 306 | queue_work(whc->workqueue, &whc->periodic_work); |
300 | 307 | ||
301 | return 0; | 308 | return err; |
302 | } | 309 | } |
303 | 310 | ||
304 | /** | 311 | /** |
@@ -353,7 +360,6 @@ void pzl_qset_delete(struct whc *whc, struct whc_qset *qset) | |||
353 | qset_delete(whc, qset); | 360 | qset_delete(whc, qset); |
354 | } | 361 | } |
355 | 362 | ||
356 | |||
357 | /** | 363 | /** |
358 | * pzl_init - initialize the periodic zone list | 364 | * pzl_init - initialize the periodic zone list |
359 | * @whc: the WHCI host controller | 365 | * @whc: the WHCI host controller |
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index 7be74314ee12..640b38fbd051 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c | |||
@@ -89,11 +89,16 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) | |||
89 | QH_INFO3_TX_RATE_53_3 | 89 | QH_INFO3_TX_RATE_53_3 |
90 | | QH_INFO3_TX_PWR(0) /* 0 == max power */ | 90 | | QH_INFO3_TX_PWR(0) /* 0 == max power */ |
91 | ); | 91 | ); |
92 | |||
93 | qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); | ||
92 | } | 94 | } |
93 | 95 | ||
94 | /** | 96 | /** |
95 | * qset_clear - clear fields in a qset so it may be reinserted into a | 97 | * qset_clear - clear fields in a qset so it may be reinserted into a |
96 | * schedule | 98 | * schedule. |
99 | * | ||
100 | * The sequence number and current window are not cleared (see | ||
101 | * qset_reset()). | ||
97 | */ | 102 | */ |
98 | void qset_clear(struct whc *whc, struct whc_qset *qset) | 103 | void qset_clear(struct whc *whc, struct whc_qset *qset) |
99 | { | 104 | { |
@@ -101,9 +106,8 @@ void qset_clear(struct whc *whc, struct whc_qset *qset) | |||
101 | qset->remove = 0; | 106 | qset->remove = 0; |
102 | 107 | ||
103 | qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); | 108 | qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); |
104 | qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start)); | 109 | qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; |
105 | qset->qh.err_count = 0; | 110 | qset->qh.err_count = 0; |
106 | qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); | ||
107 | qset->qh.scratch[0] = 0; | 111 | qset->qh.scratch[0] = 0; |
108 | qset->qh.scratch[1] = 0; | 112 | qset->qh.scratch[1] = 0; |
109 | qset->qh.scratch[2] = 0; | 113 | qset->qh.scratch[2] = 0; |
@@ -114,6 +118,20 @@ void qset_clear(struct whc *whc, struct whc_qset *qset) | |||
114 | } | 118 | } |
115 | 119 | ||
116 | /** | 120 | /** |
121 | * qset_reset - reset endpoint state in a qset. | ||
122 | * | ||
123 | * Clears the sequence number and current window. This qset must not | ||
124 | * be in the ASL or PZL. | ||
125 | */ | ||
126 | void qset_reset(struct whc *whc, struct whc_qset *qset) | ||
127 | { | ||
128 | wait_for_completion(&qset->remove_complete); | ||
129 | |||
130 | qset->qh.status &= ~QH_STATUS_SEQ_MASK; | ||
131 | qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); | ||
132 | } | ||
133 | |||
134 | /** | ||
117 | * get_qset - get the qset for an async endpoint | 135 | * get_qset - get the qset for an async endpoint |
118 | * | 136 | * |
119 | * A new qset is created if one does not already exist. | 137 | * A new qset is created if one does not already exist. |
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h index d3543a181dc9..24e94d983c5e 100644 --- a/drivers/usb/host/whci/whcd.h +++ b/drivers/usb/host/whci/whcd.h | |||
@@ -184,6 +184,7 @@ void qset_free(struct whc *whc, struct whc_qset *qset); | |||
184 | struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags); | 184 | struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags); |
185 | void qset_delete(struct whc *whc, struct whc_qset *qset); | 185 | void qset_delete(struct whc *whc, struct whc_qset *qset); |
186 | void qset_clear(struct whc *whc, struct whc_qset *qset); | 186 | void qset_clear(struct whc *whc, struct whc_qset *qset); |
187 | void qset_reset(struct whc *whc, struct whc_qset *qset); | ||
187 | int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, | 188 | int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, |
188 | gfp_t mem_flags); | 189 | gfp_t mem_flags); |
189 | void qset_free_std(struct whc *whc, struct whc_std *std); | 190 | void qset_free_std(struct whc *whc, struct whc_std *std); |
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h index 51df7e313b38..794dba0d0f0a 100644 --- a/drivers/usb/host/whci/whci-hc.h +++ b/drivers/usb/host/whci/whci-hc.h | |||
@@ -185,6 +185,7 @@ struct whc_qhead { | |||
185 | #define QH_STATUS_FLOW_CTRL (1 << 15) | 185 | #define QH_STATUS_FLOW_CTRL (1 << 15) |
186 | #define QH_STATUS_ICUR(i) ((i) << 5) | 186 | #define QH_STATUS_ICUR(i) ((i) << 5) |
187 | #define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7) | 187 | #define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7) |
188 | #define QH_STATUS_SEQ_MASK 0x1f | ||
188 | 189 | ||
189 | /** | 190 | /** |
190 | * usb_pipe_to_qh_type - USB core pipe type to QH transfer type | 191 | * usb_pipe_to_qh_type - USB core pipe type to QH transfer type |
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 569ef0fed0f6..1976e9b41800 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c | |||
@@ -579,6 +579,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) | |||
579 | * trigger the "send a ZLP?" confusion. | 579 | * trigger the "send a ZLP?" confusion. |
580 | */ | 580 | */ |
581 | rndis = (maxpacket & 0x3f) == 0 | 581 | rndis = (maxpacket & 0x3f) == 0 |
582 | && length > maxpacket | ||
582 | && length < 0xffff | 583 | && length < 0xffff |
583 | && (length % maxpacket) != 0; | 584 | && (length % maxpacket) != 0; |
584 | 585 | ||
@@ -1228,27 +1229,7 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx) | |||
1228 | 1229 | ||
1229 | hw_ep = tx_ch->hw_ep; | 1230 | hw_ep = tx_ch->hw_ep; |
1230 | 1231 | ||
1231 | /* Peripheral role never repurposes the | 1232 | musb_dma_completion(musb, index + 1, 1); |
1232 | * endpoint, so immediate completion is | ||
1233 | * safe. Host role waits for the fifo | ||
1234 | * to empty (TXPKTRDY irq) before going | ||
1235 | * to the next queued bulk transfer. | ||
1236 | */ | ||
1237 | if (is_host_active(cppi->musb)) { | ||
1238 | #if 0 | ||
1239 | /* WORKAROUND because we may | ||
1240 | * not always get TXKPTRDY ... | ||
1241 | */ | ||
1242 | int csr; | ||
1243 | |||
1244 | csr = musb_readw(hw_ep->regs, | ||
1245 | MUSB_TXCSR); | ||
1246 | if (csr & MUSB_TXCSR_TXPKTRDY) | ||
1247 | #endif | ||
1248 | completed = false; | ||
1249 | } | ||
1250 | if (completed) | ||
1251 | musb_dma_completion(musb, index + 1, 1); | ||
1252 | 1233 | ||
1253 | } else { | 1234 | } else { |
1254 | /* Bigger transfer than we could fit in | 1235 | /* Bigger transfer than we could fit in |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 338cd1611ab3..0112353ec97d 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -2170,26 +2170,22 @@ static int musb_suspend(struct platform_device *pdev, pm_message_t message) | |||
2170 | return 0; | 2170 | return 0; |
2171 | } | 2171 | } |
2172 | 2172 | ||
2173 | static int musb_resume(struct platform_device *pdev) | 2173 | static int musb_resume_early(struct platform_device *pdev) |
2174 | { | 2174 | { |
2175 | unsigned long flags; | ||
2176 | struct musb *musb = dev_to_musb(&pdev->dev); | 2175 | struct musb *musb = dev_to_musb(&pdev->dev); |
2177 | 2176 | ||
2178 | if (!musb->clock) | 2177 | if (!musb->clock) |
2179 | return 0; | 2178 | return 0; |
2180 | 2179 | ||
2181 | spin_lock_irqsave(&musb->lock, flags); | ||
2182 | |||
2183 | if (musb->set_clock) | 2180 | if (musb->set_clock) |
2184 | musb->set_clock(musb->clock, 1); | 2181 | musb->set_clock(musb->clock, 1); |
2185 | else | 2182 | else |
2186 | clk_enable(musb->clock); | 2183 | clk_enable(musb->clock); |
2187 | 2184 | ||
2188 | /* for static cmos like DaVinci, register values were preserved | 2185 | /* for static cmos like DaVinci, register values were preserved |
2189 | * unless for some reason the whole soc powered down and we're | 2186 | * unless for some reason the whole soc powered down or the USB |
2190 | * not treating that as a whole-system restart (e.g. swsusp) | 2187 | * module got reset through the PSC (vs just being disabled). |
2191 | */ | 2188 | */ |
2192 | spin_unlock_irqrestore(&musb->lock, flags); | ||
2193 | return 0; | 2189 | return 0; |
2194 | } | 2190 | } |
2195 | 2191 | ||
@@ -2207,7 +2203,7 @@ static struct platform_driver musb_driver = { | |||
2207 | .remove = __devexit_p(musb_remove), | 2203 | .remove = __devexit_p(musb_remove), |
2208 | .shutdown = musb_shutdown, | 2204 | .shutdown = musb_shutdown, |
2209 | .suspend = musb_suspend, | 2205 | .suspend = musb_suspend, |
2210 | .resume = musb_resume, | 2206 | .resume_early = musb_resume_early, |
2211 | }; | 2207 | }; |
2212 | 2208 | ||
2213 | /*-------------------------------------------------------------------------*/ | 2209 | /*-------------------------------------------------------------------------*/ |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index c7ebd0867fcc..f79440cdfe7e 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -165,9 +165,15 @@ static void nuke(struct musb_ep *ep, const int status) | |||
165 | if (is_dma_capable() && ep->dma) { | 165 | if (is_dma_capable() && ep->dma) { |
166 | struct dma_controller *c = ep->musb->dma_controller; | 166 | struct dma_controller *c = ep->musb->dma_controller; |
167 | int value; | 167 | int value; |
168 | |||
168 | if (ep->is_in) { | 169 | if (ep->is_in) { |
170 | /* | ||
171 | * The programming guide says that we must not clear | ||
172 | * the DMAMODE bit before DMAENAB, so we only | ||
173 | * clear it in the second write... | ||
174 | */ | ||
169 | musb_writew(epio, MUSB_TXCSR, | 175 | musb_writew(epio, MUSB_TXCSR, |
170 | 0 | MUSB_TXCSR_FLUSHFIFO); | 176 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); |
171 | musb_writew(epio, MUSB_TXCSR, | 177 | musb_writew(epio, MUSB_TXCSR, |
172 | 0 | MUSB_TXCSR_FLUSHFIFO); | 178 | 0 | MUSB_TXCSR_FLUSHFIFO); |
173 | } else { | 179 | } else { |
@@ -230,7 +236,7 @@ static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) | |||
230 | | IN token(s) are recd from Host. | 236 | | IN token(s) are recd from Host. |
231 | | -> DMA interrupt on completion | 237 | | -> DMA interrupt on completion |
232 | | calls TxAvail. | 238 | | calls TxAvail. |
233 | | -> stop DMA, ~DmaEenab, | 239 | | -> stop DMA, ~DMAENAB, |
234 | | -> set TxPktRdy for last short pkt or zlp | 240 | | -> set TxPktRdy for last short pkt or zlp |
235 | | -> Complete Request | 241 | | -> Complete Request |
236 | | -> Continue next request (call txstate) | 242 | | -> Continue next request (call txstate) |
@@ -315,9 +321,17 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
315 | request->dma, request_size); | 321 | request->dma, request_size); |
316 | if (use_dma) { | 322 | if (use_dma) { |
317 | if (musb_ep->dma->desired_mode == 0) { | 323 | if (musb_ep->dma->desired_mode == 0) { |
318 | /* ASSERT: DMAENAB is clear */ | 324 | /* |
319 | csr &= ~(MUSB_TXCSR_AUTOSET | | 325 | * We must not clear the DMAMODE bit |
320 | MUSB_TXCSR_DMAMODE); | 326 | * before the DMAENAB bit -- and the |
327 | * latter doesn't always get cleared | ||
328 | * before we get here... | ||
329 | */ | ||
330 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
331 | | MUSB_TXCSR_DMAENAB); | ||
332 | musb_writew(epio, MUSB_TXCSR, csr | ||
333 | | MUSB_TXCSR_P_WZC_BITS); | ||
334 | csr &= ~MUSB_TXCSR_DMAMODE; | ||
321 | csr |= (MUSB_TXCSR_DMAENAB | | 335 | csr |= (MUSB_TXCSR_DMAENAB | |
322 | MUSB_TXCSR_MODE); | 336 | MUSB_TXCSR_MODE); |
323 | /* against programming guide */ | 337 | /* against programming guide */ |
@@ -334,10 +348,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
334 | 348 | ||
335 | #elif defined(CONFIG_USB_TI_CPPI_DMA) | 349 | #elif defined(CONFIG_USB_TI_CPPI_DMA) |
336 | /* program endpoint CSR first, then setup DMA */ | 350 | /* program endpoint CSR first, then setup DMA */ |
337 | csr &= ~(MUSB_TXCSR_AUTOSET | 351 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); |
338 | | MUSB_TXCSR_DMAMODE | ||
339 | | MUSB_TXCSR_P_UNDERRUN | ||
340 | | MUSB_TXCSR_TXPKTRDY); | ||
341 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; | 352 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; |
342 | musb_writew(epio, MUSB_TXCSR, | 353 | musb_writew(epio, MUSB_TXCSR, |
343 | (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) | 354 | (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) |
@@ -364,8 +375,8 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
364 | if (!use_dma) { | 375 | if (!use_dma) { |
365 | c->channel_release(musb_ep->dma); | 376 | c->channel_release(musb_ep->dma); |
366 | musb_ep->dma = NULL; | 377 | musb_ep->dma = NULL; |
367 | /* ASSERT: DMAENAB clear */ | 378 | csr &= ~MUSB_TXCSR_DMAENAB; |
368 | csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); | 379 | musb_writew(epio, MUSB_TXCSR, csr); |
369 | /* invariant: prequest->buf is non-null */ | 380 | /* invariant: prequest->buf is non-null */ |
370 | } | 381 | } |
371 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) | 382 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 499c431a6d62..db1b57415ec7 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright 2005 Mentor Graphics Corporation | 4 | * Copyright 2005 Mentor Graphics Corporation |
5 | * Copyright (C) 2005-2006 by Texas Instruments | 5 | * Copyright (C) 2005-2006 by Texas Instruments |
6 | * Copyright (C) 2006-2007 Nokia Corporation | 6 | * Copyright (C) 2006-2007 Nokia Corporation |
7 | * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -96,8 +97,8 @@ | |||
96 | 97 | ||
97 | 98 | ||
98 | static void musb_ep_program(struct musb *musb, u8 epnum, | 99 | static void musb_ep_program(struct musb *musb, u8 epnum, |
99 | struct urb *urb, unsigned int nOut, | 100 | struct urb *urb, int is_out, |
100 | u8 *buf, u32 len); | 101 | u8 *buf, u32 offset, u32 len); |
101 | 102 | ||
102 | /* | 103 | /* |
103 | * Clear TX fifo. Needed to avoid BABBLE errors. | 104 | * Clear TX fifo. Needed to avoid BABBLE errors. |
@@ -125,6 +126,29 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | |||
125 | } | 126 | } |
126 | } | 127 | } |
127 | 128 | ||
129 | static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) | ||
130 | { | ||
131 | void __iomem *epio = ep->regs; | ||
132 | u16 csr; | ||
133 | int retries = 5; | ||
134 | |||
135 | /* scrub any data left in the fifo */ | ||
136 | do { | ||
137 | csr = musb_readw(epio, MUSB_TXCSR); | ||
138 | if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) | ||
139 | break; | ||
140 | musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); | ||
141 | csr = musb_readw(epio, MUSB_TXCSR); | ||
142 | udelay(10); | ||
143 | } while (--retries); | ||
144 | |||
145 | WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", | ||
146 | ep->epnum, csr); | ||
147 | |||
148 | /* and reset for the next transfer */ | ||
149 | musb_writew(epio, MUSB_TXCSR, 0); | ||
150 | } | ||
151 | |||
128 | /* | 152 | /* |
129 | * Start transmit. Caller is responsible for locking shared resources. | 153 | * Start transmit. Caller is responsible for locking shared resources. |
130 | * musb must be locked. | 154 | * musb must be locked. |
@@ -145,13 +169,15 @@ static inline void musb_h_tx_start(struct musb_hw_ep *ep) | |||
145 | 169 | ||
146 | } | 170 | } |
147 | 171 | ||
148 | static inline void cppi_host_txdma_start(struct musb_hw_ep *ep) | 172 | static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) |
149 | { | 173 | { |
150 | u16 txcsr; | 174 | u16 txcsr; |
151 | 175 | ||
152 | /* NOTE: no locks here; caller should lock and select EP */ | 176 | /* NOTE: no locks here; caller should lock and select EP */ |
153 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | 177 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); |
154 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; | 178 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; |
179 | if (is_cppi_enabled()) | ||
180 | txcsr |= MUSB_TXCSR_DMAMODE; | ||
155 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | 181 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); |
156 | } | 182 | } |
157 | 183 | ||
@@ -166,9 +192,10 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
166 | { | 192 | { |
167 | u16 frame; | 193 | u16 frame; |
168 | u32 len; | 194 | u32 len; |
169 | void *buf; | ||
170 | void __iomem *mbase = musb->mregs; | 195 | void __iomem *mbase = musb->mregs; |
171 | struct urb *urb = next_urb(qh); | 196 | struct urb *urb = next_urb(qh); |
197 | void *buf = urb->transfer_buffer; | ||
198 | u32 offset = 0; | ||
172 | struct musb_hw_ep *hw_ep = qh->hw_ep; | 199 | struct musb_hw_ep *hw_ep = qh->hw_ep; |
173 | unsigned pipe = urb->pipe; | 200 | unsigned pipe = urb->pipe; |
174 | u8 address = usb_pipedevice(pipe); | 201 | u8 address = usb_pipedevice(pipe); |
@@ -191,7 +218,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
191 | case USB_ENDPOINT_XFER_ISOC: | 218 | case USB_ENDPOINT_XFER_ISOC: |
192 | qh->iso_idx = 0; | 219 | qh->iso_idx = 0; |
193 | qh->frame = 0; | 220 | qh->frame = 0; |
194 | buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset; | 221 | offset = urb->iso_frame_desc[0].offset; |
195 | len = urb->iso_frame_desc[0].length; | 222 | len = urb->iso_frame_desc[0].length; |
196 | break; | 223 | break; |
197 | default: /* bulk, interrupt */ | 224 | default: /* bulk, interrupt */ |
@@ -209,14 +236,14 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
209 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; | 236 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; |
210 | default: s = "-intr"; break; | 237 | default: s = "-intr"; break; |
211 | }; s; }), | 238 | }; s; }), |
212 | epnum, buf, len); | 239 | epnum, buf + offset, len); |
213 | 240 | ||
214 | /* Configure endpoint */ | 241 | /* Configure endpoint */ |
215 | if (is_in || hw_ep->is_shared_fifo) | 242 | if (is_in || hw_ep->is_shared_fifo) |
216 | hw_ep->in_qh = qh; | 243 | hw_ep->in_qh = qh; |
217 | else | 244 | else |
218 | hw_ep->out_qh = qh; | 245 | hw_ep->out_qh = qh; |
219 | musb_ep_program(musb, epnum, urb, !is_in, buf, len); | 246 | musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); |
220 | 247 | ||
221 | /* transmit may have more work: start it when it is time */ | 248 | /* transmit may have more work: start it when it is time */ |
222 | if (is_in) | 249 | if (is_in) |
@@ -227,7 +254,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
227 | case USB_ENDPOINT_XFER_ISOC: | 254 | case USB_ENDPOINT_XFER_ISOC: |
228 | case USB_ENDPOINT_XFER_INT: | 255 | case USB_ENDPOINT_XFER_INT: |
229 | DBG(3, "check whether there's still time for periodic Tx\n"); | 256 | DBG(3, "check whether there's still time for periodic Tx\n"); |
230 | qh->iso_idx = 0; | ||
231 | frame = musb_readw(mbase, MUSB_FRAME); | 257 | frame = musb_readw(mbase, MUSB_FRAME); |
232 | /* FIXME this doesn't implement that scheduling policy ... | 258 | /* FIXME this doesn't implement that scheduling policy ... |
233 | * or handle framecounter wrapping | 259 | * or handle framecounter wrapping |
@@ -256,7 +282,7 @@ start: | |||
256 | if (!hw_ep->tx_channel) | 282 | if (!hw_ep->tx_channel) |
257 | musb_h_tx_start(hw_ep); | 283 | musb_h_tx_start(hw_ep); |
258 | else if (is_cppi_enabled() || tusb_dma_omap()) | 284 | else if (is_cppi_enabled() || tusb_dma_omap()) |
259 | cppi_host_txdma_start(hw_ep); | 285 | musb_h_tx_dma_start(hw_ep); |
260 | } | 286 | } |
261 | } | 287 | } |
262 | 288 | ||
@@ -567,10 +593,17 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | |||
567 | csr = musb_readw(ep->regs, MUSB_TXCSR); | 593 | csr = musb_readw(ep->regs, MUSB_TXCSR); |
568 | if (csr & MUSB_TXCSR_MODE) { | 594 | if (csr & MUSB_TXCSR_MODE) { |
569 | musb_h_tx_flush_fifo(ep); | 595 | musb_h_tx_flush_fifo(ep); |
596 | csr = musb_readw(ep->regs, MUSB_TXCSR); | ||
570 | musb_writew(ep->regs, MUSB_TXCSR, | 597 | musb_writew(ep->regs, MUSB_TXCSR, |
571 | MUSB_TXCSR_FRCDATATOG); | 598 | csr | MUSB_TXCSR_FRCDATATOG); |
572 | } | 599 | } |
573 | /* clear mode (and everything else) to enable Rx */ | 600 | |
601 | /* | ||
602 | * Clear the MODE bit (and everything else) to enable Rx. | ||
603 | * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. | ||
604 | */ | ||
605 | if (csr & MUSB_TXCSR_DMAMODE) | ||
606 | musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); | ||
574 | musb_writew(ep->regs, MUSB_TXCSR, 0); | 607 | musb_writew(ep->regs, MUSB_TXCSR, 0); |
575 | 608 | ||
576 | /* scrub all previous state, clearing toggle */ | 609 | /* scrub all previous state, clearing toggle */ |
@@ -601,14 +634,68 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | |||
601 | ep->rx_reinit = 0; | 634 | ep->rx_reinit = 0; |
602 | } | 635 | } |
603 | 636 | ||
637 | static bool musb_tx_dma_program(struct dma_controller *dma, | ||
638 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, | ||
639 | struct urb *urb, u32 offset, u32 length) | ||
640 | { | ||
641 | struct dma_channel *channel = hw_ep->tx_channel; | ||
642 | void __iomem *epio = hw_ep->regs; | ||
643 | u16 pkt_size = qh->maxpacket; | ||
644 | u16 csr; | ||
645 | u8 mode; | ||
646 | |||
647 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
648 | if (length > channel->max_len) | ||
649 | length = channel->max_len; | ||
650 | |||
651 | csr = musb_readw(epio, MUSB_TXCSR); | ||
652 | if (length > pkt_size) { | ||
653 | mode = 1; | ||
654 | csr |= MUSB_TXCSR_AUTOSET | ||
655 | | MUSB_TXCSR_DMAMODE | ||
656 | | MUSB_TXCSR_DMAENAB; | ||
657 | } else { | ||
658 | mode = 0; | ||
659 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); | ||
660 | csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ | ||
661 | } | ||
662 | channel->desired_mode = mode; | ||
663 | musb_writew(epio, MUSB_TXCSR, csr); | ||
664 | #else | ||
665 | if (!is_cppi_enabled() && !tusb_dma_omap()) | ||
666 | return false; | ||
667 | |||
668 | channel->actual_len = 0; | ||
669 | |||
670 | /* | ||
671 | * TX uses "RNDIS" mode automatically but needs help | ||
672 | * to identify the zero-length-final-packet case. | ||
673 | */ | ||
674 | mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; | ||
675 | #endif | ||
676 | |||
677 | qh->segsize = length; | ||
678 | |||
679 | if (!dma->channel_program(channel, pkt_size, mode, | ||
680 | urb->transfer_dma + offset, length)) { | ||
681 | dma->channel_release(channel); | ||
682 | hw_ep->tx_channel = NULL; | ||
683 | |||
684 | csr = musb_readw(epio, MUSB_TXCSR); | ||
685 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); | ||
686 | musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); | ||
687 | return false; | ||
688 | } | ||
689 | return true; | ||
690 | } | ||
604 | 691 | ||
605 | /* | 692 | /* |
606 | * Program an HDRC endpoint as per the given URB | 693 | * Program an HDRC endpoint as per the given URB |
607 | * Context: irqs blocked, controller lock held | 694 | * Context: irqs blocked, controller lock held |
608 | */ | 695 | */ |
609 | static void musb_ep_program(struct musb *musb, u8 epnum, | 696 | static void musb_ep_program(struct musb *musb, u8 epnum, |
610 | struct urb *urb, unsigned int is_out, | 697 | struct urb *urb, int is_out, |
611 | u8 *buf, u32 len) | 698 | u8 *buf, u32 offset, u32 len) |
612 | { | 699 | { |
613 | struct dma_controller *dma_controller; | 700 | struct dma_controller *dma_controller; |
614 | struct dma_channel *dma_channel; | 701 | struct dma_channel *dma_channel; |
@@ -667,12 +754,17 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
667 | 754 | ||
668 | /* general endpoint setup */ | 755 | /* general endpoint setup */ |
669 | if (epnum) { | 756 | if (epnum) { |
670 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
671 | |||
672 | /* flush all old state, set default */ | 757 | /* flush all old state, set default */ |
673 | musb_h_tx_flush_fifo(hw_ep); | 758 | musb_h_tx_flush_fifo(hw_ep); |
759 | |||
760 | /* | ||
761 | * We must not clear the DMAMODE bit before or in | ||
762 | * the same cycle with the DMAENAB bit, so we clear | ||
763 | * the latter first... | ||
764 | */ | ||
674 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT | 765 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT |
675 | | MUSB_TXCSR_DMAMODE | 766 | | MUSB_TXCSR_AUTOSET |
767 | | MUSB_TXCSR_DMAENAB | ||
676 | | MUSB_TXCSR_FRCDATATOG | 768 | | MUSB_TXCSR_FRCDATATOG |
677 | | MUSB_TXCSR_H_RXSTALL | 769 | | MUSB_TXCSR_H_RXSTALL |
678 | | MUSB_TXCSR_H_ERROR | 770 | | MUSB_TXCSR_H_ERROR |
@@ -680,24 +772,20 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
680 | ); | 772 | ); |
681 | csr |= MUSB_TXCSR_MODE; | 773 | csr |= MUSB_TXCSR_MODE; |
682 | 774 | ||
683 | if (usb_gettoggle(urb->dev, | 775 | if (usb_gettoggle(urb->dev, qh->epnum, 1)) |
684 | qh->epnum, 1)) | ||
685 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE | 776 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE |
686 | | MUSB_TXCSR_H_DATATOGGLE; | 777 | | MUSB_TXCSR_H_DATATOGGLE; |
687 | else | 778 | else |
688 | csr |= MUSB_TXCSR_CLRDATATOG; | 779 | csr |= MUSB_TXCSR_CLRDATATOG; |
689 | 780 | ||
690 | /* twice in case of double packet buffering */ | ||
691 | musb_writew(epio, MUSB_TXCSR, csr); | 781 | musb_writew(epio, MUSB_TXCSR, csr); |
692 | /* REVISIT may need to clear FLUSHFIFO ... */ | 782 | /* REVISIT may need to clear FLUSHFIFO ... */ |
783 | csr &= ~MUSB_TXCSR_DMAMODE; | ||
693 | musb_writew(epio, MUSB_TXCSR, csr); | 784 | musb_writew(epio, MUSB_TXCSR, csr); |
694 | csr = musb_readw(epio, MUSB_TXCSR); | 785 | csr = musb_readw(epio, MUSB_TXCSR); |
695 | } else { | 786 | } else { |
696 | /* endpoint 0: just flush */ | 787 | /* endpoint 0: just flush */ |
697 | musb_writew(epio, MUSB_CSR0, | 788 | musb_h_ep0_flush_fifo(hw_ep); |
698 | csr | MUSB_CSR0_FLUSHFIFO); | ||
699 | musb_writew(epio, MUSB_CSR0, | ||
700 | csr | MUSB_CSR0_FLUSHFIFO); | ||
701 | } | 789 | } |
702 | 790 | ||
703 | /* target addr and (for multipoint) hub addr/port */ | 791 | /* target addr and (for multipoint) hub addr/port */ |
@@ -734,113 +822,14 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
734 | else | 822 | else |
735 | load_count = min((u32) packet_sz, len); | 823 | load_count = min((u32) packet_sz, len); |
736 | 824 | ||
737 | #ifdef CONFIG_USB_INVENTRA_DMA | 825 | if (dma_channel && musb_tx_dma_program(dma_controller, |
738 | if (dma_channel) { | 826 | hw_ep, qh, urb, offset, len)) |
739 | 827 | load_count = 0; | |
740 | /* clear previous state */ | ||
741 | csr = musb_readw(epio, MUSB_TXCSR); | ||
742 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
743 | | MUSB_TXCSR_DMAMODE | ||
744 | | MUSB_TXCSR_DMAENAB); | ||
745 | csr |= MUSB_TXCSR_MODE; | ||
746 | musb_writew(epio, MUSB_TXCSR, | ||
747 | csr | MUSB_TXCSR_MODE); | ||
748 | |||
749 | qh->segsize = min(len, dma_channel->max_len); | ||
750 | |||
751 | if (qh->segsize <= packet_sz) | ||
752 | dma_channel->desired_mode = 0; | ||
753 | else | ||
754 | dma_channel->desired_mode = 1; | ||
755 | |||
756 | |||
757 | if (dma_channel->desired_mode == 0) { | ||
758 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
759 | | MUSB_TXCSR_DMAMODE); | ||
760 | csr |= (MUSB_TXCSR_DMAENAB); | ||
761 | /* against programming guide */ | ||
762 | } else | ||
763 | csr |= (MUSB_TXCSR_AUTOSET | ||
764 | | MUSB_TXCSR_DMAENAB | ||
765 | | MUSB_TXCSR_DMAMODE); | ||
766 | |||
767 | musb_writew(epio, MUSB_TXCSR, csr); | ||
768 | |||
769 | dma_ok = dma_controller->channel_program( | ||
770 | dma_channel, packet_sz, | ||
771 | dma_channel->desired_mode, | ||
772 | urb->transfer_dma, | ||
773 | qh->segsize); | ||
774 | if (dma_ok) { | ||
775 | load_count = 0; | ||
776 | } else { | ||
777 | dma_controller->channel_release(dma_channel); | ||
778 | if (is_out) | ||
779 | hw_ep->tx_channel = NULL; | ||
780 | else | ||
781 | hw_ep->rx_channel = NULL; | ||
782 | dma_channel = NULL; | ||
783 | } | ||
784 | } | ||
785 | #endif | ||
786 | |||
787 | /* candidate for DMA */ | ||
788 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | ||
789 | |||
790 | /* program endpoint CSRs first, then setup DMA. | ||
791 | * assume CPPI setup succeeds. | ||
792 | * defer enabling dma. | ||
793 | */ | ||
794 | csr = musb_readw(epio, MUSB_TXCSR); | ||
795 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
796 | | MUSB_TXCSR_DMAMODE | ||
797 | | MUSB_TXCSR_DMAENAB); | ||
798 | csr |= MUSB_TXCSR_MODE; | ||
799 | musb_writew(epio, MUSB_TXCSR, | ||
800 | csr | MUSB_TXCSR_MODE); | ||
801 | |||
802 | dma_channel->actual_len = 0L; | ||
803 | qh->segsize = len; | ||
804 | |||
805 | /* TX uses "rndis" mode automatically, but needs help | ||
806 | * to identify the zero-length-final-packet case. | ||
807 | */ | ||
808 | dma_ok = dma_controller->channel_program( | ||
809 | dma_channel, packet_sz, | ||
810 | (urb->transfer_flags | ||
811 | & URB_ZERO_PACKET) | ||
812 | == URB_ZERO_PACKET, | ||
813 | urb->transfer_dma, | ||
814 | qh->segsize); | ||
815 | if (dma_ok) { | ||
816 | load_count = 0; | ||
817 | } else { | ||
818 | dma_controller->channel_release(dma_channel); | ||
819 | hw_ep->tx_channel = NULL; | ||
820 | dma_channel = NULL; | ||
821 | |||
822 | /* REVISIT there's an error path here that | ||
823 | * needs handling: can't do dma, but | ||
824 | * there's no pio buffer address... | ||
825 | */ | ||
826 | } | ||
827 | } | ||
828 | 828 | ||
829 | if (load_count) { | 829 | if (load_count) { |
830 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
831 | |||
832 | /* PIO to load FIFO */ | 830 | /* PIO to load FIFO */ |
833 | qh->segsize = load_count; | 831 | qh->segsize = load_count; |
834 | musb_write_fifo(hw_ep, load_count, buf); | 832 | musb_write_fifo(hw_ep, load_count, buf); |
835 | csr = musb_readw(epio, MUSB_TXCSR); | ||
836 | csr &= ~(MUSB_TXCSR_DMAENAB | ||
837 | | MUSB_TXCSR_DMAMODE | ||
838 | | MUSB_TXCSR_AUTOSET); | ||
839 | /* write CSR */ | ||
840 | csr |= MUSB_TXCSR_MODE; | ||
841 | |||
842 | if (epnum) | ||
843 | musb_writew(epio, MUSB_TXCSR, csr); | ||
844 | } | 833 | } |
845 | 834 | ||
846 | /* re-enable interrupt */ | 835 | /* re-enable interrupt */ |
@@ -895,7 +884,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
895 | dma_channel, packet_sz, | 884 | dma_channel, packet_sz, |
896 | !(urb->transfer_flags | 885 | !(urb->transfer_flags |
897 | & URB_SHORT_NOT_OK), | 886 | & URB_SHORT_NOT_OK), |
898 | urb->transfer_dma, | 887 | urb->transfer_dma + offset, |
899 | qh->segsize); | 888 | qh->segsize); |
900 | if (!dma_ok) { | 889 | if (!dma_ok) { |
901 | dma_controller->channel_release( | 890 | dma_controller->channel_release( |
@@ -1063,11 +1052,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) | |||
1063 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | 1052 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; |
1064 | musb_writew(epio, MUSB_CSR0, csr); | 1053 | musb_writew(epio, MUSB_CSR0, csr); |
1065 | } else { | 1054 | } else { |
1066 | csr |= MUSB_CSR0_FLUSHFIFO; | 1055 | musb_h_ep0_flush_fifo(hw_ep); |
1067 | musb_writew(epio, MUSB_CSR0, csr); | ||
1068 | musb_writew(epio, MUSB_CSR0, csr); | ||
1069 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | ||
1070 | musb_writew(epio, MUSB_CSR0, csr); | ||
1071 | } | 1056 | } |
1072 | 1057 | ||
1073 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); | 1058 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); |
@@ -1081,10 +1066,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) | |||
1081 | * SHOULD NEVER HAPPEN! */ | 1066 | * SHOULD NEVER HAPPEN! */ |
1082 | ERR("no URB for end 0\n"); | 1067 | ERR("no URB for end 0\n"); |
1083 | 1068 | ||
1084 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | 1069 | musb_h_ep0_flush_fifo(hw_ep); |
1085 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | ||
1086 | musb_writew(epio, MUSB_CSR0, 0); | ||
1087 | |||
1088 | goto done; | 1070 | goto done; |
1089 | } | 1071 | } |
1090 | 1072 | ||
@@ -1145,8 +1127,8 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1145 | int pipe; | 1127 | int pipe; |
1146 | bool done = false; | 1128 | bool done = false; |
1147 | u16 tx_csr; | 1129 | u16 tx_csr; |
1148 | size_t wLength = 0; | 1130 | size_t length = 0; |
1149 | u8 *buf = NULL; | 1131 | size_t offset = 0; |
1150 | struct urb *urb; | 1132 | struct urb *urb; |
1151 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | 1133 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
1152 | void __iomem *epio = hw_ep->regs; | 1134 | void __iomem *epio = hw_ep->regs; |
@@ -1164,7 +1146,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1164 | /* with CPPI, DMA sometimes triggers "extra" irqs */ | 1146 | /* with CPPI, DMA sometimes triggers "extra" irqs */ |
1165 | if (!urb) { | 1147 | if (!urb) { |
1166 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 1148 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); |
1167 | goto finish; | 1149 | return; |
1168 | } | 1150 | } |
1169 | 1151 | ||
1170 | pipe = urb->pipe; | 1152 | pipe = urb->pipe; |
@@ -1201,7 +1183,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1201 | musb_writew(epio, MUSB_TXCSR, | 1183 | musb_writew(epio, MUSB_TXCSR, |
1202 | MUSB_TXCSR_H_WZC_BITS | 1184 | MUSB_TXCSR_H_WZC_BITS |
1203 | | MUSB_TXCSR_TXPKTRDY); | 1185 | | MUSB_TXCSR_TXPKTRDY); |
1204 | goto finish; | 1186 | return; |
1205 | } | 1187 | } |
1206 | 1188 | ||
1207 | if (status) { | 1189 | if (status) { |
@@ -1233,29 +1215,89 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1233 | /* second cppi case */ | 1215 | /* second cppi case */ |
1234 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 1216 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
1235 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 1217 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); |
1236 | goto finish; | 1218 | return; |
1219 | } | ||
1220 | |||
1221 | if (is_dma_capable() && dma && !status) { | ||
1222 | /* | ||
1223 | * DMA has completed. But if we're using DMA mode 1 (multi | ||
1224 | * packet DMA), we need a terminal TXPKTRDY interrupt before | ||
1225 | * we can consider this transfer completed, lest we trash | ||
1226 | * its last packet when writing the next URB's data. So we | ||
1227 | * switch back to mode 0 to get that interrupt; we'll come | ||
1228 | * back here once it happens. | ||
1229 | */ | ||
1230 | if (tx_csr & MUSB_TXCSR_DMAMODE) { | ||
1231 | /* | ||
1232 | * We shouldn't clear DMAMODE with DMAENAB set; so | ||
1233 | * clear them in a safe order. That should be OK | ||
1234 | * once TXPKTRDY has been set (and I've never seen | ||
1235 | * it being 0 at this moment -- DMA interrupt latency | ||
1236 | * is significant) but if it hasn't been then we have | ||
1237 | * no choice but to stop being polite and ignore the | ||
1238 | * programmer's guide... :-) | ||
1239 | * | ||
1240 | * Note that we must write TXCSR with TXPKTRDY cleared | ||
1241 | * in order not to re-trigger the packet send (this bit | ||
1242 | * can't be cleared by CPU), and there's another caveat: | ||
1243 | * TXPKTRDY may be set shortly and then cleared in the | ||
1244 | * double-buffered FIFO mode, so we do an extra TXCSR | ||
1245 | * read for debouncing... | ||
1246 | */ | ||
1247 | tx_csr &= musb_readw(epio, MUSB_TXCSR); | ||
1248 | if (tx_csr & MUSB_TXCSR_TXPKTRDY) { | ||
1249 | tx_csr &= ~(MUSB_TXCSR_DMAENAB | | ||
1250 | MUSB_TXCSR_TXPKTRDY); | ||
1251 | musb_writew(epio, MUSB_TXCSR, | ||
1252 | tx_csr | MUSB_TXCSR_H_WZC_BITS); | ||
1253 | } | ||
1254 | tx_csr &= ~(MUSB_TXCSR_DMAMODE | | ||
1255 | MUSB_TXCSR_TXPKTRDY); | ||
1256 | musb_writew(epio, MUSB_TXCSR, | ||
1257 | tx_csr | MUSB_TXCSR_H_WZC_BITS); | ||
1258 | |||
1259 | /* | ||
1260 | * There is no guarantee that we'll get an interrupt | ||
1261 | * after clearing DMAMODE as we might have done this | ||
1262 | * too late (after TXPKTRDY was cleared by controller). | ||
1263 | * Re-read TXCSR as we have spoiled its previous value. | ||
1264 | */ | ||
1265 | tx_csr = musb_readw(epio, MUSB_TXCSR); | ||
1266 | } | ||
1237 | 1267 | ||
1268 | /* | ||
1269 | * We may get here from a DMA completion or TXPKTRDY interrupt. | ||
1270 | * In any case, we must check the FIFO status here and bail out | ||
1271 | * only if the FIFO still has data -- that should prevent the | ||
1272 | * "missed" TXPKTRDY interrupts and deal with double-buffered | ||
1273 | * FIFO mode too... | ||
1274 | */ | ||
1275 | if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { | ||
1276 | DBG(2, "DMA complete but packet still in FIFO, " | ||
1277 | "CSR %04x\n", tx_csr); | ||
1278 | return; | ||
1279 | } | ||
1238 | } | 1280 | } |
1239 | 1281 | ||
1240 | /* REVISIT this looks wrong... */ | ||
1241 | if (!status || dma || usb_pipeisoc(pipe)) { | 1282 | if (!status || dma || usb_pipeisoc(pipe)) { |
1242 | if (dma) | 1283 | if (dma) |
1243 | wLength = dma->actual_len; | 1284 | length = dma->actual_len; |
1244 | else | 1285 | else |
1245 | wLength = qh->segsize; | 1286 | length = qh->segsize; |
1246 | qh->offset += wLength; | 1287 | qh->offset += length; |
1247 | 1288 | ||
1248 | if (usb_pipeisoc(pipe)) { | 1289 | if (usb_pipeisoc(pipe)) { |
1249 | struct usb_iso_packet_descriptor *d; | 1290 | struct usb_iso_packet_descriptor *d; |
1250 | 1291 | ||
1251 | d = urb->iso_frame_desc + qh->iso_idx; | 1292 | d = urb->iso_frame_desc + qh->iso_idx; |
1252 | d->actual_length = qh->segsize; | 1293 | d->actual_length = length; |
1294 | d->status = status; | ||
1253 | if (++qh->iso_idx >= urb->number_of_packets) { | 1295 | if (++qh->iso_idx >= urb->number_of_packets) { |
1254 | done = true; | 1296 | done = true; |
1255 | } else { | 1297 | } else { |
1256 | d++; | 1298 | d++; |
1257 | buf = urb->transfer_buffer + d->offset; | 1299 | offset = d->offset; |
1258 | wLength = d->length; | 1300 | length = d->length; |
1259 | } | 1301 | } |
1260 | } else if (dma) { | 1302 | } else if (dma) { |
1261 | done = true; | 1303 | done = true; |
@@ -1268,10 +1310,8 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1268 | & URB_ZERO_PACKET)) | 1310 | & URB_ZERO_PACKET)) |
1269 | done = true; | 1311 | done = true; |
1270 | if (!done) { | 1312 | if (!done) { |
1271 | buf = urb->transfer_buffer | 1313 | offset = qh->offset; |
1272 | + qh->offset; | 1314 | length = urb->transfer_buffer_length - offset; |
1273 | wLength = urb->transfer_buffer_length | ||
1274 | - qh->offset; | ||
1275 | } | 1315 | } |
1276 | } | 1316 | } |
1277 | } | 1317 | } |
@@ -1290,28 +1330,31 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1290 | urb->status = status; | 1330 | urb->status = status; |
1291 | urb->actual_length = qh->offset; | 1331 | urb->actual_length = qh->offset; |
1292 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); | 1332 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); |
1333 | return; | ||
1334 | } else if (usb_pipeisoc(pipe) && dma) { | ||
1335 | if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, | ||
1336 | offset, length)) | ||
1337 | return; | ||
1338 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { | ||
1339 | DBG(1, "not complete, but DMA enabled?\n"); | ||
1340 | return; | ||
1341 | } | ||
1293 | 1342 | ||
1294 | } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) { | 1343 | /* |
1295 | /* WARN_ON(!buf); */ | 1344 | * PIO: start next packet in this URB. |
1296 | 1345 | * | |
1297 | /* REVISIT: some docs say that when hw_ep->tx_double_buffered, | 1346 | * REVISIT: some docs say that when hw_ep->tx_double_buffered, |
1298 | * (and presumably, fifo is not half-full) we should write TWO | 1347 | * (and presumably, FIFO is not half-full) we should write *two* |
1299 | * packets before updating TXCSR ... other docs disagree ... | 1348 | * packets before updating TXCSR; other docs disagree... |
1300 | */ | 1349 | */ |
1301 | /* PIO: start next packet in this URB */ | 1350 | if (length > qh->maxpacket) |
1302 | if (wLength > qh->maxpacket) | 1351 | length = qh->maxpacket; |
1303 | wLength = qh->maxpacket; | 1352 | musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); |
1304 | musb_write_fifo(hw_ep, wLength, buf); | 1353 | qh->segsize = length; |
1305 | qh->segsize = wLength; | ||
1306 | |||
1307 | musb_ep_select(mbase, epnum); | ||
1308 | musb_writew(epio, MUSB_TXCSR, | ||
1309 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | ||
1310 | } else | ||
1311 | DBG(1, "not complete, but dma enabled?\n"); | ||
1312 | 1354 | ||
1313 | finish: | 1355 | musb_ep_select(mbase, epnum); |
1314 | return; | 1356 | musb_writew(epio, MUSB_TXCSR, |
1357 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | ||
1315 | } | 1358 | } |
1316 | 1359 | ||
1317 | 1360 | ||
@@ -1841,7 +1884,7 @@ static int musb_urb_enqueue( | |||
1841 | unsigned long flags; | 1884 | unsigned long flags; |
1842 | struct musb *musb = hcd_to_musb(hcd); | 1885 | struct musb *musb = hcd_to_musb(hcd); |
1843 | struct usb_host_endpoint *hep = urb->ep; | 1886 | struct usb_host_endpoint *hep = urb->ep; |
1844 | struct musb_qh *qh = hep->hcpriv; | 1887 | struct musb_qh *qh; |
1845 | struct usb_endpoint_descriptor *epd = &hep->desc; | 1888 | struct usb_endpoint_descriptor *epd = &hep->desc; |
1846 | int ret; | 1889 | int ret; |
1847 | unsigned type_reg; | 1890 | unsigned type_reg; |
@@ -1853,22 +1896,21 @@ static int musb_urb_enqueue( | |||
1853 | 1896 | ||
1854 | spin_lock_irqsave(&musb->lock, flags); | 1897 | spin_lock_irqsave(&musb->lock, flags); |
1855 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | 1898 | ret = usb_hcd_link_urb_to_ep(hcd, urb); |
1899 | qh = ret ? NULL : hep->hcpriv; | ||
1900 | if (qh) | ||
1901 | urb->hcpriv = qh; | ||
1856 | spin_unlock_irqrestore(&musb->lock, flags); | 1902 | spin_unlock_irqrestore(&musb->lock, flags); |
1857 | if (ret) | ||
1858 | return ret; | ||
1859 | 1903 | ||
1860 | /* DMA mapping was already done, if needed, and this urb is on | 1904 | /* DMA mapping was already done, if needed, and this urb is on |
1861 | * hep->urb_list ... so there's little to do unless hep wasn't | 1905 | * hep->urb_list now ... so we're done, unless hep wasn't yet |
1862 | * yet scheduled onto a live qh. | 1906 | * scheduled onto a live qh. |
1863 | * | 1907 | * |
1864 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets | 1908 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets |
1865 | * disabled, testing for empty qh->ring and avoiding qh setup costs | 1909 | * disabled, testing for empty qh->ring and avoiding qh setup costs |
1866 | * except for the first urb queued after a config change. | 1910 | * except for the first urb queued after a config change. |
1867 | */ | 1911 | */ |
1868 | if (qh) { | 1912 | if (qh || ret) |
1869 | urb->hcpriv = qh; | 1913 | return ret; |
1870 | return 0; | ||
1871 | } | ||
1872 | 1914 | ||
1873 | /* Allocate and initialize qh, minimizing the work done each time | 1915 | /* Allocate and initialize qh, minimizing the work done each time |
1874 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. | 1916 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. |
@@ -2044,7 +2086,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | |||
2044 | * endpoint's irq status here to avoid bogus irqs. | 2086 | * endpoint's irq status here to avoid bogus irqs. |
2045 | * clearing that status is platform-specific... | 2087 | * clearing that status is platform-specific... |
2046 | */ | 2088 | */ |
2047 | } else { | 2089 | } else if (ep->epnum) { |
2048 | musb_h_tx_flush_fifo(ep); | 2090 | musb_h_tx_flush_fifo(ep); |
2049 | csr = musb_readw(epio, MUSB_TXCSR); | 2091 | csr = musb_readw(epio, MUSB_TXCSR); |
2050 | csr &= ~(MUSB_TXCSR_AUTOSET | 2092 | csr &= ~(MUSB_TXCSR_AUTOSET |
@@ -2058,6 +2100,8 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | |||
2058 | musb_writew(epio, MUSB_TXCSR, csr); | 2100 | musb_writew(epio, MUSB_TXCSR, csr); |
2059 | /* flush cpu writebuffer */ | 2101 | /* flush cpu writebuffer */ |
2060 | csr = musb_readw(epio, MUSB_TXCSR); | 2102 | csr = musb_readw(epio, MUSB_TXCSR); |
2103 | } else { | ||
2104 | musb_h_ep0_flush_fifo(ep); | ||
2061 | } | 2105 | } |
2062 | if (status == 0) | 2106 | if (status == 0) |
2063 | musb_advance_schedule(ep->musb, urb, ep, is_in); | 2107 | musb_advance_schedule(ep->musb, urb, ep, is_in); |
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 8662e9e159c3..5e83f96d6b77 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c | |||
@@ -195,30 +195,32 @@ static int dma_channel_abort(struct dma_channel *channel) | |||
195 | void __iomem *mbase = musb_channel->controller->base; | 195 | void __iomem *mbase = musb_channel->controller->base; |
196 | 196 | ||
197 | u8 bchannel = musb_channel->idx; | 197 | u8 bchannel = musb_channel->idx; |
198 | int offset; | ||
198 | u16 csr; | 199 | u16 csr; |
199 | 200 | ||
200 | if (channel->status == MUSB_DMA_STATUS_BUSY) { | 201 | if (channel->status == MUSB_DMA_STATUS_BUSY) { |
201 | if (musb_channel->transmit) { | 202 | if (musb_channel->transmit) { |
202 | 203 | offset = MUSB_EP_OFFSET(musb_channel->epnum, | |
203 | csr = musb_readw(mbase, | 204 | MUSB_TXCSR); |
204 | MUSB_EP_OFFSET(musb_channel->epnum, | 205 | |
205 | MUSB_TXCSR)); | 206 | /* |
206 | csr &= ~(MUSB_TXCSR_AUTOSET | | 207 | * The programming guide says that we must clear |
207 | MUSB_TXCSR_DMAENAB | | 208 | * the DMAENAB bit before the DMAMODE bit... |
208 | MUSB_TXCSR_DMAMODE); | 209 | */ |
209 | musb_writew(mbase, | 210 | csr = musb_readw(mbase, offset); |
210 | MUSB_EP_OFFSET(musb_channel->epnum, MUSB_TXCSR), | 211 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); |
211 | csr); | 212 | musb_writew(mbase, offset, csr); |
213 | csr &= ~MUSB_TXCSR_DMAMODE; | ||
214 | musb_writew(mbase, offset, csr); | ||
212 | } else { | 215 | } else { |
213 | csr = musb_readw(mbase, | 216 | offset = MUSB_EP_OFFSET(musb_channel->epnum, |
214 | MUSB_EP_OFFSET(musb_channel->epnum, | 217 | MUSB_RXCSR); |
215 | MUSB_RXCSR)); | 218 | |
219 | csr = musb_readw(mbase, offset); | ||
216 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | | 220 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | |
217 | MUSB_RXCSR_DMAENAB | | 221 | MUSB_RXCSR_DMAENAB | |
218 | MUSB_RXCSR_DMAMODE); | 222 | MUSB_RXCSR_DMAMODE); |
219 | musb_writew(mbase, | 223 | musb_writew(mbase, offset, csr); |
220 | MUSB_EP_OFFSET(musb_channel->epnum, MUSB_RXCSR), | ||
221 | csr); | ||
222 | } | 224 | } |
223 | 225 | ||
224 | musb_writew(mbase, | 226 | musb_writew(mbase, |
@@ -296,20 +298,28 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) | |||
296 | && ((channel->desired_mode == 0) | 298 | && ((channel->desired_mode == 0) |
297 | || (channel->actual_len & | 299 | || (channel->actual_len & |
298 | (musb_channel->max_packet_sz - 1))) | 300 | (musb_channel->max_packet_sz - 1))) |
299 | ) { | 301 | ) { |
302 | u8 epnum = musb_channel->epnum; | ||
303 | int offset = MUSB_EP_OFFSET(epnum, | ||
304 | MUSB_TXCSR); | ||
305 | u16 txcsr; | ||
306 | |||
307 | /* | ||
308 | * The programming guide says that we | ||
309 | * must clear DMAENAB before DMAMODE. | ||
310 | */ | ||
311 | musb_ep_select(mbase, epnum); | ||
312 | txcsr = musb_readw(mbase, offset); | ||
313 | txcsr &= ~(MUSB_TXCSR_DMAENAB | ||
314 | | MUSB_TXCSR_AUTOSET); | ||
315 | musb_writew(mbase, offset, txcsr); | ||
300 | /* Send out the packet */ | 316 | /* Send out the packet */ |
301 | musb_ep_select(mbase, | 317 | txcsr &= ~MUSB_TXCSR_DMAMODE; |
302 | musb_channel->epnum); | 318 | txcsr |= MUSB_TXCSR_TXPKTRDY; |
303 | musb_writew(mbase, MUSB_EP_OFFSET( | 319 | musb_writew(mbase, offset, txcsr); |
304 | musb_channel->epnum, | ||
305 | MUSB_TXCSR), | ||
306 | MUSB_TXCSR_TXPKTRDY); | ||
307 | } else { | ||
308 | musb_dma_completion( | ||
309 | musb, | ||
310 | musb_channel->epnum, | ||
311 | musb_channel->transmit); | ||
312 | } | 320 | } |
321 | musb_dma_completion(musb, musb_channel->epnum, | ||
322 | musb_channel->transmit); | ||
313 | } | 323 | } |
314 | } | 324 | } |
315 | } | 325 | } |
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c index 4b933f646f2e..c567168f89af 100644 --- a/drivers/usb/otg/nop-usb-xceiv.c +++ b/drivers/usb/otg/nop-usb-xceiv.c | |||
@@ -36,14 +36,14 @@ struct nop_usb_xceiv { | |||
36 | struct device *dev; | 36 | struct device *dev; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static u64 nop_xceiv_dmamask = DMA_32BIT_MASK; | 39 | static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32); |
40 | 40 | ||
41 | static struct platform_device nop_xceiv_device = { | 41 | static struct platform_device nop_xceiv_device = { |
42 | .name = "nop_usb_xceiv", | 42 | .name = "nop_usb_xceiv", |
43 | .id = -1, | 43 | .id = -1, |
44 | .dev = { | 44 | .dev = { |
45 | .dma_mask = &nop_xceiv_dmamask, | 45 | .dma_mask = &nop_xceiv_dmamask, |
46 | .coherent_dma_mask = DMA_32BIT_MASK, | 46 | .coherent_dma_mask = DMA_BIT_MASK(32), |
47 | .platform_data = NULL, | 47 | .platform_data = NULL, |
48 | }, | 48 | }, |
49 | }; | 49 | }; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index dcc87aaa8628..8100f1d25904 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -668,6 +668,7 @@ static struct usb_device_id id_table_combined [] = { | |||
668 | { USB_DEVICE(DE_VID, WHT_PID) }, | 668 | { USB_DEVICE(DE_VID, WHT_PID) }, |
669 | { USB_DEVICE(ADI_VID, ADI_GNICE_PID), | 669 | { USB_DEVICE(ADI_VID, ADI_GNICE_PID), |
670 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 670 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
671 | { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, | ||
671 | { }, /* Optional parameter entry */ | 672 | { }, /* Optional parameter entry */ |
672 | { } /* Terminating entry */ | 673 | { } /* Terminating entry */ |
673 | }; | 674 | }; |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index daaf63db0b50..c09f658a448b 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -913,6 +913,13 @@ | |||
913 | #define ADI_GNICE_PID 0xF000 | 913 | #define ADI_GNICE_PID 0xF000 |
914 | 914 | ||
915 | /* | 915 | /* |
916 | * JETI SPECTROMETER SPECBOS 1201 | ||
917 | * http://www.jeti.com/products/sys/scb/scb1201.php | ||
918 | */ | ||
919 | #define JETI_VID 0x0c6c | ||
920 | #define JETI_SPC1201_PID 0x04b2 | ||
921 | |||
922 | /* | ||
916 | * BmRequestType: 1100 0000b | 923 | * BmRequestType: 1100 0000b |
917 | * bRequest: FTDI_E2_READ | 924 | * bRequest: FTDI_E2_READ |
918 | * wValue: 0 | 925 | * wValue: 0 |
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c index 2e8e05462ef7..b66b71ccd12b 100644 --- a/drivers/usb/serial/moto_modem.c +++ b/drivers/usb/serial/moto_modem.c | |||
@@ -25,6 +25,7 @@ static struct usb_device_id id_table [] = { | |||
25 | { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ | 25 | { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ |
26 | { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ | 26 | { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ |
27 | { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ | 27 | { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ |
28 | { USB_DEVICE(0x22b8, 0x2c64) }, /* Motorola V950 phone */ | ||
28 | { }, | 29 | { }, |
29 | }; | 30 | }; |
30 | MODULE_DEVICE_TABLE(usb, id_table); | 31 | MODULE_DEVICE_TABLE(usb, id_table); |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index d560c0b54e6e..47bd070f24b7 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -300,6 +300,10 @@ static int option_resume(struct usb_serial *serial); | |||
300 | #define BENQ_VENDOR_ID 0x04a5 | 300 | #define BENQ_VENDOR_ID 0x04a5 |
301 | #define BENQ_PRODUCT_H10 0x4068 | 301 | #define BENQ_PRODUCT_H10 0x4068 |
302 | 302 | ||
303 | #define DLINK_VENDOR_ID 0x1186 | ||
304 | #define DLINK_PRODUCT_DWM_652 0x3e04 | ||
305 | |||
306 | |||
303 | static struct usb_device_id option_ids[] = { | 307 | static struct usb_device_id option_ids[] = { |
304 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 308 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
305 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 309 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -516,6 +520,7 @@ static struct usb_device_id option_ids[] = { | |||
516 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, | 520 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, |
517 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, | 521 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, |
518 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, | 522 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, |
523 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, | ||
519 | { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */ | 524 | { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */ |
520 | { } /* Terminating entry */ | 525 | { } /* Terminating entry */ |
521 | }; | 526 | }; |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index e6d6b0c17fd9..7528b8d57f1c 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -26,6 +26,27 @@ static struct usb_device_id id_table[] = { | |||
26 | {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ | 26 | {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
27 | {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ | 27 | {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ |
28 | {USB_DEVICE(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */ | 28 | {USB_DEVICE(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */ |
29 | {USB_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ | ||
30 | {USB_DEVICE(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */ | ||
31 | {USB_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ | ||
32 | {USB_DEVICE(0x413c, 0x8171)}, /* Dell Gobi QDL device */ | ||
33 | {USB_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ | ||
34 | {USB_DEVICE(0x1410, 0xa008)}, /* Novatel Gobi QDL device */ | ||
35 | {USB_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ | ||
36 | {USB_DEVICE(0x0b05, 0x1774)}, /* Asus Gobi QDL device */ | ||
37 | {USB_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ | ||
38 | {USB_DEVICE(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */ | ||
39 | {USB_DEVICE(0x1557, 0x0a80)}, /* OQO Gobi QDL device */ | ||
40 | {USB_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ | ||
41 | {USB_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */ | ||
42 | {USB_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */ | ||
43 | {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ | ||
44 | {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ | ||
45 | {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ | ||
46 | {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ | ||
47 | {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ | ||
48 | {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ | ||
49 | {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ | ||
29 | { } /* Terminating entry */ | 50 | { } /* Terminating entry */ |
30 | }; | 51 | }; |
31 | MODULE_DEVICE_TABLE(usb, id_table); | 52 | MODULE_DEVICE_TABLE(usb, id_table); |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 2620bf6fe5e1..9c4c700c7cc6 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -1215,20 +1215,22 @@ static void ti_bulk_in_callback(struct urb *urb) | |||
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | tty = tty_port_tty_get(&port->port); | 1217 | tty = tty_port_tty_get(&port->port); |
1218 | if (tty && urb->actual_length) { | 1218 | if (tty) { |
1219 | usb_serial_debug_data(debug, dev, __func__, | 1219 | if (urb->actual_length) { |
1220 | urb->actual_length, urb->transfer_buffer); | 1220 | usb_serial_debug_data(debug, dev, __func__, |
1221 | 1221 | urb->actual_length, urb->transfer_buffer); | |
1222 | if (!tport->tp_is_open) | 1222 | |
1223 | dbg("%s - port closed, dropping data", __func__); | 1223 | if (!tport->tp_is_open) |
1224 | else | 1224 | dbg("%s - port closed, dropping data", |
1225 | ti_recv(&urb->dev->dev, tty, | 1225 | __func__); |
1226 | else | ||
1227 | ti_recv(&urb->dev->dev, tty, | ||
1226 | urb->transfer_buffer, | 1228 | urb->transfer_buffer, |
1227 | urb->actual_length); | 1229 | urb->actual_length); |
1228 | 1230 | spin_lock(&tport->tp_lock); | |
1229 | spin_lock(&tport->tp_lock); | 1231 | tport->tp_icount.rx += urb->actual_length; |
1230 | tport->tp_icount.rx += urb->actual_length; | 1232 | spin_unlock(&tport->tp_lock); |
1231 | spin_unlock(&tport->tp_lock); | 1233 | } |
1232 | tty_kref_put(tty); | 1234 | tty_kref_put(tty); |
1233 | } | 1235 | } |
1234 | 1236 | ||
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile index 5be54c019662..ef7e5a8ceab5 100644 --- a/drivers/usb/storage/Makefile +++ b/drivers/usb/storage/Makefile | |||
@@ -17,7 +17,8 @@ usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ | |||
17 | ifeq ($(CONFIG_USB_LIBUSUAL),) | 17 | ifeq ($(CONFIG_USB_LIBUSUAL),) |
18 | usb-storage-objs += usual-tables.o | 18 | usb-storage-objs += usual-tables.o |
19 | else | 19 | else |
20 | obj-$(CONFIG_USB) += libusual.o usual-tables.o | 20 | obj-$(CONFIG_USB) += usb-libusual.o |
21 | usb-libusual-objs := libusual.o usual-tables.o | ||
21 | endif | 22 | endif |
22 | 23 | ||
23 | obj-$(CONFIG_USB_STORAGE_ALAUDA) += ums-alauda.o | 24 | obj-$(CONFIG_USB_STORAGE_ALAUDA) += ums-alauda.o |
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 49aedb36dc19..fcb320217218 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
@@ -247,10 +247,8 @@ int usb_stor_clear_halt(struct us_data *us, unsigned int pipe) | |||
247 | USB_ENDPOINT_HALT, endp, | 247 | USB_ENDPOINT_HALT, endp, |
248 | NULL, 0, 3*HZ); | 248 | NULL, 0, 3*HZ); |
249 | 249 | ||
250 | /* reset the endpoint toggle */ | ||
251 | if (result >= 0) | 250 | if (result >= 0) |
252 | usb_settoggle(us->pusb_dev, usb_pipeendpoint(pipe), | 251 | usb_reset_endpoint(us->pusb_dev, endp); |
253 | usb_pipeout(pipe), 0); | ||
254 | 252 | ||
255 | US_DEBUGP("%s: result = %d\n", __func__, result); | 253 | US_DEBUGP("%s: result = %d\n", __func__, result); |
256 | return result; | 254 | return result; |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1c1f643e8a78..96db479d1165 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -975,12 +975,14 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff, | |||
975 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 975 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
976 | US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ), | 976 | US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ), |
977 | 977 | ||
978 | /* Reported by Rauch Wolke <rauchwolke@gmx.net> */ | 978 | /* Reported by Rauch Wolke <rauchwolke@gmx.net> |
979 | * and augmented by binbin <binbinsh@gmail.com> (Bugzilla #12882) | ||
980 | */ | ||
979 | UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, | 981 | UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, |
980 | "Simple Tech/Datafab", | 982 | "Simple Tech/Datafab", |
981 | "CF+SM Reader", | 983 | "CF+SM Reader", |
982 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 984 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
983 | US_FL_IGNORE_RESIDUE ), | 985 | US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ), |
984 | 986 | ||
985 | /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant | 987 | /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant |
986 | * to the USB storage specification in two ways: | 988 | * to the USB storage specification in two ways: |
@@ -1376,6 +1378,14 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, | |||
1376 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1378 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
1377 | 0), | 1379 | 0), |
1378 | 1380 | ||
1381 | /* Reported by Pascal Terjan <pterjan@mandriva.com> | ||
1382 | * Ignore driver CD mode and force into modem mode by default. | ||
1383 | */ | ||
1384 | UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000, | ||
1385 | "D-Link", | ||
1386 | "USB Mass Storage", | ||
1387 | US_SC_DEVICE, US_PR_DEVICE, option_ms_init, 0), | ||
1388 | |||
1379 | /* Reported by Kevin Lloyd <linux@sierrawireless.com> | 1389 | /* Reported by Kevin Lloyd <linux@sierrawireless.com> |
1380 | * Entry is needed for the initializer function override, | 1390 | * Entry is needed for the initializer function override, |
1381 | * which instructs the device to load as a modem | 1391 | * which instructs the device to load as a modem |
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index 386eaa22d215..4ac4300a3f9a 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c | |||
@@ -267,6 +267,8 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work) | |||
267 | mutex_lock(&wusbhc->mutex); | 267 | mutex_lock(&wusbhc->mutex); |
268 | wusbhc_devconnect_acked(wusbhc, wusb_dev); | 268 | wusbhc_devconnect_acked(wusbhc, wusb_dev); |
269 | mutex_unlock(&wusbhc->mutex); | 269 | mutex_unlock(&wusbhc->mutex); |
270 | |||
271 | wusb_dev_put(wusb_dev); | ||
270 | } | 272 | } |
271 | 273 | ||
272 | /* | 274 | /* |
@@ -396,7 +398,8 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, | |||
396 | 398 | ||
397 | /* After a device disconnects, change the GTK (see [WUSB] | 399 | /* After a device disconnects, change the GTK (see [WUSB] |
398 | * section 6.2.11.2). */ | 400 | * section 6.2.11.2). */ |
399 | wusbhc_gtk_rekey(wusbhc); | 401 | if (wusbhc->active) |
402 | wusbhc_gtk_rekey(wusbhc); | ||
400 | 403 | ||
401 | /* The Wireless USB part has forgotten about the device already; now | 404 | /* The Wireless USB part has forgotten about the device already; now |
402 | * khubd's timer will pick up the disconnection and remove the USB | 405 | * khubd's timer will pick up the disconnection and remove the USB |
@@ -1084,15 +1087,21 @@ error_mmcie_set: | |||
1084 | * wusbhc_devconnect_stop - stop managing connected devices | 1087 | * wusbhc_devconnect_stop - stop managing connected devices |
1085 | * @wusbhc: the WUSB HC | 1088 | * @wusbhc: the WUSB HC |
1086 | * | 1089 | * |
1087 | * Removes the Host Info IE and stops the keep alives. | 1090 | * Disconnects any devices still connected, stops the keep alives and |
1088 | * | 1091 | * removes the Host Info IE. |
1089 | * FIXME: should this disconnect all devices? | ||
1090 | */ | 1092 | */ |
1091 | void wusbhc_devconnect_stop(struct wusbhc *wusbhc) | 1093 | void wusbhc_devconnect_stop(struct wusbhc *wusbhc) |
1092 | { | 1094 | { |
1093 | cancel_delayed_work_sync(&wusbhc->keep_alive_timer); | 1095 | int i; |
1094 | WARN_ON(!list_empty(&wusbhc->cack_list)); | ||
1095 | 1096 | ||
1097 | mutex_lock(&wusbhc->mutex); | ||
1098 | for (i = 0; i < wusbhc->ports_max; i++) { | ||
1099 | if (wusbhc->port[i].wusb_dev) | ||
1100 | __wusbhc_dev_disconnect(wusbhc, &wusbhc->port[i]); | ||
1101 | } | ||
1102 | mutex_unlock(&wusbhc->mutex); | ||
1103 | |||
1104 | cancel_delayed_work_sync(&wusbhc->keep_alive_timer); | ||
1096 | wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr); | 1105 | wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr); |
1097 | kfree(wusbhc->wuie_host_info); | 1106 | kfree(wusbhc->wuie_host_info); |
1098 | wusbhc->wuie_host_info = NULL; | 1107 | wusbhc->wuie_host_info = NULL; |
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c index 07c63a31c799..ee6256f23636 100644 --- a/drivers/usb/wusbcore/wusbhc.c +++ b/drivers/usb/wusbcore/wusbhc.c | |||
@@ -88,33 +88,31 @@ static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show, | |||
88 | wusb_trust_timeout_store); | 88 | wusb_trust_timeout_store); |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Show & store the current WUSB CHID | 91 | * Show the current WUSB CHID. |
92 | */ | 92 | */ |
93 | static ssize_t wusb_chid_show(struct device *dev, | 93 | static ssize_t wusb_chid_show(struct device *dev, |
94 | struct device_attribute *attr, char *buf) | 94 | struct device_attribute *attr, char *buf) |
95 | { | 95 | { |
96 | struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); | 96 | struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); |
97 | const struct wusb_ckhdid *chid; | ||
97 | ssize_t result = 0; | 98 | ssize_t result = 0; |
98 | 99 | ||
99 | if (wusbhc->wuie_host_info != NULL) | 100 | if (wusbhc->wuie_host_info != NULL) |
100 | result += ckhdid_printf(buf, PAGE_SIZE, | 101 | chid = &wusbhc->wuie_host_info->CHID; |
101 | &wusbhc->wuie_host_info->CHID); | 102 | else |
103 | chid = &wusb_ckhdid_zero; | ||
104 | |||
105 | result += ckhdid_printf(buf, PAGE_SIZE, chid); | ||
106 | result += sprintf(buf + result, "\n"); | ||
107 | |||
102 | return result; | 108 | return result; |
103 | } | 109 | } |
104 | 110 | ||
105 | /* | 111 | /* |
106 | * Store a new CHID | 112 | * Store a new CHID. |
107 | * | ||
108 | * This will (FIXME) trigger many changes. | ||
109 | * | ||
110 | * - Send an all zeros CHID and it will stop the controller | ||
111 | * - Send a non-zero CHID and it will start it | ||
112 | * (unless it was started, it will just change the CHID, | ||
113 | * diconnecting all devices first). | ||
114 | * | 113 | * |
115 | * So first we scan the MMC we are sent and then we act on it. We | 114 | * - Write an all zeros CHID and it will stop the controller |
116 | * read it in the same format as we print it, an ASCII string of 16 | 115 | * - Write a non-zero CHID and it will start it. |
117 | * hex bytes. | ||
118 | * | 116 | * |
119 | * See wusbhc_chid_set() for more info. | 117 | * See wusbhc_chid_set() for more info. |
120 | */ | 118 | */ |
@@ -339,13 +337,15 @@ void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status) | |||
339 | { | 337 | { |
340 | struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); | 338 | struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); |
341 | 339 | ||
342 | if (status == 0) { | 340 | if (status == 0 && wusb_dev) { |
343 | wusb_dev->entry_ts = jiffies; | 341 | wusb_dev->entry_ts = jiffies; |
344 | 342 | ||
345 | /* wusbhc_devconnect_acked() can't be called from from | 343 | /* wusbhc_devconnect_acked() can't be called from |
346 | atomic context so defer it to a work queue. */ | 344 | atomic context so defer it to a work queue. */ |
347 | if (!list_empty(&wusb_dev->cack_node)) | 345 | if (!list_empty(&wusb_dev->cack_node)) |
348 | queue_work(wusbd, &wusb_dev->devconnect_acked_work); | 346 | queue_work(wusbd, &wusb_dev->devconnect_acked_work); |
347 | else | ||
348 | wusb_dev_put(wusb_dev); | ||
349 | } | 349 | } |
350 | 350 | ||
351 | usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status); | 351 | usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status); |
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 16bb7e3c0310..6c37e8ee5efe 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c | |||
@@ -698,8 +698,8 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo) | |||
698 | found: | 698 | found: |
699 | /* | 699 | /* |
700 | * Some methods fail to retrieve SCLK and MCLK values, we apply default | 700 | * Some methods fail to retrieve SCLK and MCLK values, we apply default |
701 | * settings in this case (200Mhz). If that really happne often, we could | 701 | * settings in this case (200Mhz). If that really happens often, we |
702 | * fetch from registers instead... | 702 | * could fetch from registers instead... |
703 | */ | 703 | */ |
704 | if (rinfo->pll.mclk == 0) | 704 | if (rinfo->pll.mclk == 0) |
705 | rinfo->pll.mclk = 20000; | 705 | rinfo->pll.mclk = 20000; |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index dd37cbcaf8ce..157057c79ca3 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
@@ -35,8 +35,6 @@ static int fb_notifier_callback(struct notifier_block *self, | |||
35 | return 0; | 35 | return 0; |
36 | 36 | ||
37 | bd = container_of(self, struct backlight_device, fb_notif); | 37 | bd = container_of(self, struct backlight_device, fb_notif); |
38 | if (!lock_fb_info(evdata->info)) | ||
39 | return -ENODEV; | ||
40 | mutex_lock(&bd->ops_lock); | 38 | mutex_lock(&bd->ops_lock); |
41 | if (bd->ops) | 39 | if (bd->ops) |
42 | if (!bd->ops->check_fb || | 40 | if (!bd->ops->check_fb || |
@@ -49,7 +47,6 @@ static int fb_notifier_callback(struct notifier_block *self, | |||
49 | backlight_update_status(bd); | 47 | backlight_update_status(bd); |
50 | } | 48 | } |
51 | mutex_unlock(&bd->ops_lock); | 49 | mutex_unlock(&bd->ops_lock); |
52 | unlock_fb_info(evdata->info); | ||
53 | return 0; | 50 | return 0; |
54 | } | 51 | } |
55 | 52 | ||
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index 0bb13df0fa89..b6449470106c 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c | |||
@@ -40,8 +40,6 @@ static int fb_notifier_callback(struct notifier_block *self, | |||
40 | if (!ld->ops) | 40 | if (!ld->ops) |
41 | return 0; | 41 | return 0; |
42 | 42 | ||
43 | if (!lock_fb_info(evdata->info)) | ||
44 | return -ENODEV; | ||
45 | mutex_lock(&ld->ops_lock); | 43 | mutex_lock(&ld->ops_lock); |
46 | if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) { | 44 | if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) { |
47 | if (event == FB_EVENT_BLANK) { | 45 | if (event == FB_EVENT_BLANK) { |
@@ -53,7 +51,6 @@ static int fb_notifier_callback(struct notifier_block *self, | |||
53 | } | 51 | } |
54 | } | 52 | } |
55 | mutex_unlock(&ld->ops_lock); | 53 | mutex_unlock(&ld->ops_lock); |
56 | unlock_fb_info(evdata->info); | ||
57 | return 0; | 54 | return 0; |
58 | } | 55 | } |
59 | 56 | ||
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c index d42e385f091c..4c2bf923418c 100644 --- a/drivers/video/cirrusfb.c +++ b/drivers/video/cirrusfb.c | |||
@@ -567,9 +567,7 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var, | |||
567 | default: | 567 | default: |
568 | dev_dbg(info->device, | 568 | dev_dbg(info->device, |
569 | "Unsupported bpp size: %d\n", var->bits_per_pixel); | 569 | "Unsupported bpp size: %d\n", var->bits_per_pixel); |
570 | assert(false); | 570 | return -EINVAL; |
571 | /* should never occur */ | ||
572 | break; | ||
573 | } | 571 | } |
574 | 572 | ||
575 | if (var->xres_virtual < var->xres) | 573 | if (var->xres_virtual < var->xres) |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 2cd500a304f2..471a9a60376a 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -2263,9 +2263,12 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info, | |||
2263 | } | 2263 | } |
2264 | 2264 | ||
2265 | 2265 | ||
2266 | if (!lock_fb_info(info)) | ||
2267 | return; | ||
2266 | event.info = info; | 2268 | event.info = info; |
2267 | event.data = ␣ | 2269 | event.data = ␣ |
2268 | fb_notifier_call_chain(FB_EVENT_CONBLANK, &event); | 2270 | fb_notifier_call_chain(FB_EVENT_CONBLANK, &event); |
2271 | unlock_fb_info(info); | ||
2269 | } | 2272 | } |
2270 | 2273 | ||
2271 | static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) | 2274 | static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) |
@@ -2956,8 +2959,6 @@ static int fbcon_fb_unregistered(struct fb_info *info) | |||
2956 | { | 2959 | { |
2957 | int i, idx; | 2960 | int i, idx; |
2958 | 2961 | ||
2959 | if (!lock_fb_info(info)) | ||
2960 | return -ENODEV; | ||
2961 | idx = info->node; | 2962 | idx = info->node; |
2962 | for (i = first_fb_vc; i <= last_fb_vc; i++) { | 2963 | for (i = first_fb_vc; i <= last_fb_vc; i++) { |
2963 | if (con2fb_map[i] == idx) | 2964 | if (con2fb_map[i] == idx) |
@@ -2985,8 +2986,6 @@ static int fbcon_fb_unregistered(struct fb_info *info) | |||
2985 | if (primary_device == idx) | 2986 | if (primary_device == idx) |
2986 | primary_device = -1; | 2987 | primary_device = -1; |
2987 | 2988 | ||
2988 | unlock_fb_info(info); | ||
2989 | |||
2990 | if (!num_registered_fb) | 2989 | if (!num_registered_fb) |
2991 | unregister_con_driver(&fb_con); | 2990 | unregister_con_driver(&fb_con); |
2992 | 2991 | ||
@@ -3027,11 +3026,8 @@ static int fbcon_fb_registered(struct fb_info *info) | |||
3027 | { | 3026 | { |
3028 | int ret = 0, i, idx; | 3027 | int ret = 0, i, idx; |
3029 | 3028 | ||
3030 | if (!lock_fb_info(info)) | ||
3031 | return -ENODEV; | ||
3032 | idx = info->node; | 3029 | idx = info->node; |
3033 | fbcon_select_primary(info); | 3030 | fbcon_select_primary(info); |
3034 | unlock_fb_info(info); | ||
3035 | 3031 | ||
3036 | if (info_idx == -1) { | 3032 | if (info_idx == -1) { |
3037 | for (i = first_fb_vc; i <= last_fb_vc; i++) { | 3033 | for (i = first_fb_vc; i <= last_fb_vc; i++) { |
@@ -3152,53 +3148,23 @@ static int fbcon_event_notify(struct notifier_block *self, | |||
3152 | 3148 | ||
3153 | switch(action) { | 3149 | switch(action) { |
3154 | case FB_EVENT_SUSPEND: | 3150 | case FB_EVENT_SUSPEND: |
3155 | if (!lock_fb_info(info)) { | ||
3156 | ret = -ENODEV; | ||
3157 | goto done; | ||
3158 | } | ||
3159 | fbcon_suspended(info); | 3151 | fbcon_suspended(info); |
3160 | unlock_fb_info(info); | ||
3161 | break; | 3152 | break; |
3162 | case FB_EVENT_RESUME: | 3153 | case FB_EVENT_RESUME: |
3163 | if (!lock_fb_info(info)) { | ||
3164 | ret = -ENODEV; | ||
3165 | goto done; | ||
3166 | } | ||
3167 | fbcon_resumed(info); | 3154 | fbcon_resumed(info); |
3168 | unlock_fb_info(info); | ||
3169 | break; | 3155 | break; |
3170 | case FB_EVENT_MODE_CHANGE: | 3156 | case FB_EVENT_MODE_CHANGE: |
3171 | if (!lock_fb_info(info)) { | ||
3172 | ret = -ENODEV; | ||
3173 | goto done; | ||
3174 | } | ||
3175 | fbcon_modechanged(info); | 3157 | fbcon_modechanged(info); |
3176 | unlock_fb_info(info); | ||
3177 | break; | 3158 | break; |
3178 | case FB_EVENT_MODE_CHANGE_ALL: | 3159 | case FB_EVENT_MODE_CHANGE_ALL: |
3179 | if (!lock_fb_info(info)) { | ||
3180 | ret = -ENODEV; | ||
3181 | goto done; | ||
3182 | } | ||
3183 | fbcon_set_all_vcs(info); | 3160 | fbcon_set_all_vcs(info); |
3184 | unlock_fb_info(info); | ||
3185 | break; | 3161 | break; |
3186 | case FB_EVENT_MODE_DELETE: | 3162 | case FB_EVENT_MODE_DELETE: |
3187 | mode = event->data; | 3163 | mode = event->data; |
3188 | if (!lock_fb_info(info)) { | ||
3189 | ret = -ENODEV; | ||
3190 | goto done; | ||
3191 | } | ||
3192 | ret = fbcon_mode_deleted(info, mode); | 3164 | ret = fbcon_mode_deleted(info, mode); |
3193 | unlock_fb_info(info); | ||
3194 | break; | 3165 | break; |
3195 | case FB_EVENT_FB_UNBIND: | 3166 | case FB_EVENT_FB_UNBIND: |
3196 | if (!lock_fb_info(info)) { | ||
3197 | ret = -ENODEV; | ||
3198 | goto done; | ||
3199 | } | ||
3200 | idx = info->node; | 3167 | idx = info->node; |
3201 | unlock_fb_info(info); | ||
3202 | ret = fbcon_fb_unbind(idx); | 3168 | ret = fbcon_fb_unbind(idx); |
3203 | break; | 3169 | break; |
3204 | case FB_EVENT_FB_REGISTERED: | 3170 | case FB_EVENT_FB_REGISTERED: |
@@ -3217,29 +3183,14 @@ static int fbcon_event_notify(struct notifier_block *self, | |||
3217 | con2fb->framebuffer = con2fb_map[con2fb->console - 1]; | 3183 | con2fb->framebuffer = con2fb_map[con2fb->console - 1]; |
3218 | break; | 3184 | break; |
3219 | case FB_EVENT_BLANK: | 3185 | case FB_EVENT_BLANK: |
3220 | if (!lock_fb_info(info)) { | ||
3221 | ret = -ENODEV; | ||
3222 | goto done; | ||
3223 | } | ||
3224 | fbcon_fb_blanked(info, *(int *)event->data); | 3186 | fbcon_fb_blanked(info, *(int *)event->data); |
3225 | unlock_fb_info(info); | ||
3226 | break; | 3187 | break; |
3227 | case FB_EVENT_NEW_MODELIST: | 3188 | case FB_EVENT_NEW_MODELIST: |
3228 | if (!lock_fb_info(info)) { | ||
3229 | ret = -ENODEV; | ||
3230 | goto done; | ||
3231 | } | ||
3232 | fbcon_new_modelist(info); | 3189 | fbcon_new_modelist(info); |
3233 | unlock_fb_info(info); | ||
3234 | break; | 3190 | break; |
3235 | case FB_EVENT_GET_REQ: | 3191 | case FB_EVENT_GET_REQ: |
3236 | caps = event->data; | 3192 | caps = event->data; |
3237 | if (!lock_fb_info(info)) { | ||
3238 | ret = -ENODEV; | ||
3239 | goto done; | ||
3240 | } | ||
3241 | fbcon_get_requirement(info, caps); | 3193 | fbcon_get_requirement(info, caps); |
3242 | unlock_fb_info(info); | ||
3243 | break; | 3194 | break; |
3244 | } | 3195 | } |
3245 | done: | 3196 | done: |
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index 0c5b9a9fd56f..8dea2bc92705 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c | |||
@@ -210,12 +210,15 @@ static int __init efifb_probe(struct platform_device *dev) | |||
210 | unsigned int size_total; | 210 | unsigned int size_total; |
211 | int request_succeeded = 0; | 211 | int request_succeeded = 0; |
212 | 212 | ||
213 | printk(KERN_INFO "efifb: probing for efifb\n"); | ||
214 | |||
215 | if (!screen_info.lfb_depth) | 213 | if (!screen_info.lfb_depth) |
216 | screen_info.lfb_depth = 32; | 214 | screen_info.lfb_depth = 32; |
217 | if (!screen_info.pages) | 215 | if (!screen_info.pages) |
218 | screen_info.pages = 1; | 216 | screen_info.pages = 1; |
217 | if (!screen_info.lfb_base) { | ||
218 | printk(KERN_DEBUG "efifb: invalid framebuffer address\n"); | ||
219 | return -ENODEV; | ||
220 | } | ||
221 | printk(KERN_INFO "efifb: probing for efifb\n"); | ||
219 | 222 | ||
220 | /* just assume they're all unset if any are */ | 223 | /* just assume they're all unset if any are */ |
221 | if (!screen_info.blue_size) { | 224 | if (!screen_info.blue_size) { |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 2ac32e6b5953..d412a1ddc12f 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -1097,8 +1097,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, | |||
1097 | return -EINVAL; | 1097 | return -EINVAL; |
1098 | con2fb.framebuffer = -1; | 1098 | con2fb.framebuffer = -1; |
1099 | event.data = &con2fb; | 1099 | event.data = &con2fb; |
1100 | if (!lock_fb_info(info)) | ||
1101 | return -ENODEV; | ||
1100 | event.info = info; | 1102 | event.info = info; |
1101 | fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event); | 1103 | fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event); |
1104 | unlock_fb_info(info); | ||
1102 | ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0; | 1105 | ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0; |
1103 | break; | 1106 | break; |
1104 | case FBIOPUT_CON2FBMAP: | 1107 | case FBIOPUT_CON2FBMAP: |
@@ -1115,8 +1118,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, | |||
1115 | break; | 1118 | break; |
1116 | } | 1119 | } |
1117 | event.data = &con2fb; | 1120 | event.data = &con2fb; |
1121 | if (!lock_fb_info(info)) | ||
1122 | return -ENODEV; | ||
1118 | event.info = info; | 1123 | event.info = info; |
1119 | ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event); | 1124 | ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event); |
1125 | unlock_fb_info(info); | ||
1120 | break; | 1126 | break; |
1121 | case FBIOBLANK: | 1127 | case FBIOBLANK: |
1122 | if (!lock_fb_info(info)) | 1128 | if (!lock_fb_info(info)) |
@@ -1521,7 +1527,10 @@ register_framebuffer(struct fb_info *fb_info) | |||
1521 | registered_fb[i] = fb_info; | 1527 | registered_fb[i] = fb_info; |
1522 | 1528 | ||
1523 | event.info = fb_info; | 1529 | event.info = fb_info; |
1530 | if (!lock_fb_info(fb_info)) | ||
1531 | return -ENODEV; | ||
1524 | fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); | 1532 | fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); |
1533 | unlock_fb_info(fb_info); | ||
1525 | return 0; | 1534 | return 0; |
1526 | } | 1535 | } |
1527 | 1536 | ||
@@ -1555,8 +1564,12 @@ unregister_framebuffer(struct fb_info *fb_info) | |||
1555 | goto done; | 1564 | goto done; |
1556 | } | 1565 | } |
1557 | 1566 | ||
1567 | |||
1568 | if (!lock_fb_info(fb_info)) | ||
1569 | return -ENODEV; | ||
1558 | event.info = fb_info; | 1570 | event.info = fb_info; |
1559 | ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); | 1571 | ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); |
1572 | unlock_fb_info(fb_info); | ||
1560 | 1573 | ||
1561 | if (ret) { | 1574 | if (ret) { |
1562 | ret = -EINVAL; | 1575 | ret = -EINVAL; |
@@ -1590,6 +1603,8 @@ void fb_set_suspend(struct fb_info *info, int state) | |||
1590 | { | 1603 | { |
1591 | struct fb_event event; | 1604 | struct fb_event event; |
1592 | 1605 | ||
1606 | if (!lock_fb_info(info)) | ||
1607 | return; | ||
1593 | event.info = info; | 1608 | event.info = info; |
1594 | if (state) { | 1609 | if (state) { |
1595 | fb_notifier_call_chain(FB_EVENT_SUSPEND, &event); | 1610 | fb_notifier_call_chain(FB_EVENT_SUSPEND, &event); |
@@ -1598,6 +1613,7 @@ void fb_set_suspend(struct fb_info *info, int state) | |||
1598 | info->state = FBINFO_STATE_RUNNING; | 1613 | info->state = FBINFO_STATE_RUNNING; |
1599 | fb_notifier_call_chain(FB_EVENT_RESUME, &event); | 1614 | fb_notifier_call_chain(FB_EVENT_RESUME, &event); |
1600 | } | 1615 | } |
1616 | unlock_fb_info(info); | ||
1601 | } | 1617 | } |
1602 | 1618 | ||
1603 | /** | 1619 | /** |
@@ -1667,8 +1683,11 @@ int fb_new_modelist(struct fb_info *info) | |||
1667 | err = 1; | 1683 | err = 1; |
1668 | 1684 | ||
1669 | if (!list_empty(&info->modelist)) { | 1685 | if (!list_empty(&info->modelist)) { |
1686 | if (!lock_fb_info(info)) | ||
1687 | return -ENODEV; | ||
1670 | event.info = info; | 1688 | event.info = info; |
1671 | err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); | 1689 | err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); |
1690 | unlock_fb_info(info); | ||
1672 | } | 1691 | } |
1673 | 1692 | ||
1674 | return err; | 1693 | return err; |
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h index a50bea614804..40984551c927 100644 --- a/drivers/video/intelfb/intelfb.h +++ b/drivers/video/intelfb/intelfb.h | |||
@@ -53,6 +53,7 @@ | |||
53 | #define PCI_DEVICE_ID_INTEL_830M 0x3577 | 53 | #define PCI_DEVICE_ID_INTEL_830M 0x3577 |
54 | #define PCI_DEVICE_ID_INTEL_845G 0x2562 | 54 | #define PCI_DEVICE_ID_INTEL_845G 0x2562 |
55 | #define PCI_DEVICE_ID_INTEL_85XGM 0x3582 | 55 | #define PCI_DEVICE_ID_INTEL_85XGM 0x3582 |
56 | #define PCI_DEVICE_ID_INTEL_854 0x358E | ||
56 | #define PCI_DEVICE_ID_INTEL_865G 0x2572 | 57 | #define PCI_DEVICE_ID_INTEL_865G 0x2572 |
57 | #define PCI_DEVICE_ID_INTEL_915G 0x2582 | 58 | #define PCI_DEVICE_ID_INTEL_915G 0x2582 |
58 | #define PCI_DEVICE_ID_INTEL_915GM 0x2592 | 59 | #define PCI_DEVICE_ID_INTEL_915GM 0x2592 |
@@ -154,6 +155,7 @@ enum intel_chips { | |||
154 | INTEL_85XGM, | 155 | INTEL_85XGM, |
155 | INTEL_852GM, | 156 | INTEL_852GM, |
156 | INTEL_852GME, | 157 | INTEL_852GME, |
158 | INTEL_854, | ||
157 | INTEL_855GM, | 159 | INTEL_855GM, |
158 | INTEL_855GME, | 160 | INTEL_855GME, |
159 | INTEL_865G, | 161 | INTEL_865G, |
diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c index b3065492bb20..487f2be47460 100644 --- a/drivers/video/intelfb/intelfb_i2c.c +++ b/drivers/video/intelfb/intelfb_i2c.c | |||
@@ -156,6 +156,7 @@ void intelfb_create_i2c_busses(struct intelfb_info *dinfo) | |||
156 | switch(dinfo->chipset) { | 156 | switch(dinfo->chipset) { |
157 | case INTEL_830M: | 157 | case INTEL_830M: |
158 | case INTEL_845G: | 158 | case INTEL_845G: |
159 | case INTEL_854: | ||
159 | case INTEL_855GM: | 160 | case INTEL_855GM: |
160 | case INTEL_865G: | 161 | case INTEL_865G: |
161 | dinfo->output[i].type = INTELFB_OUTPUT_DVO; | 162 | dinfo->output[i].type = INTELFB_OUTPUT_DVO; |
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c index 6d8e5415c809..ace14fe02fc4 100644 --- a/drivers/video/intelfb/intelfbdrv.c +++ b/drivers/video/intelfb/intelfbdrv.c | |||
@@ -182,6 +182,7 @@ static struct pci_device_id intelfb_pci_table[] __devinitdata = { | |||
182 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G }, | 182 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G }, |
183 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM }, | 183 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM }, |
184 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_865G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_865G }, | 184 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_865G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_865G }, |
185 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_854, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_854 }, | ||
185 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915G }, | 186 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915G }, |
186 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM }, | 187 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM }, |
187 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G }, | 188 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G }, |
diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c index 8b26b27c2db6..0689f97c5238 100644 --- a/drivers/video/intelfb/intelfbhw.c +++ b/drivers/video/intelfb/intelfbhw.c | |||
@@ -84,6 +84,11 @@ int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo) | |||
84 | dinfo->mobile = 0; | 84 | dinfo->mobile = 0; |
85 | dinfo->pll_index = PLLS_I8xx; | 85 | dinfo->pll_index = PLLS_I8xx; |
86 | return 0; | 86 | return 0; |
87 | case PCI_DEVICE_ID_INTEL_854: | ||
88 | dinfo->mobile = 1; | ||
89 | dinfo->name = "Intel(R) 854"; | ||
90 | dinfo->chipset = INTEL_854; | ||
91 | return 0; | ||
87 | case PCI_DEVICE_ID_INTEL_85XGM: | 92 | case PCI_DEVICE_ID_INTEL_85XGM: |
88 | tmp = 0; | 93 | tmp = 0; |
89 | dinfo->mobile = 1; | 94 | dinfo->mobile = 1; |
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 4dcec48a1d78..c3fad34309ed 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c | |||
@@ -45,11 +45,11 @@ struct s3fb_info { | |||
45 | static const struct svga_fb_format s3fb_formats[] = { | 45 | static const struct svga_fb_format s3fb_formats[] = { |
46 | { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, | 46 | { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, |
47 | FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 16}, | 47 | FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 16}, |
48 | { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, | 48 | { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, |
49 | FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16}, | 49 | FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16}, |
50 | { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1, | 50 | { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 1, |
51 | FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16}, | 51 | FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16}, |
52 | { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, | 52 | { 8, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0, |
53 | FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 4, 8}, | 53 | FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 4, 8}, |
54 | {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0, | 54 | {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0, |
55 | FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 4}, | 55 | FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 4}, |
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c index fad58cf9ef73..10ddad8e17d6 100644 --- a/drivers/video/sa1100fb.c +++ b/drivers/video/sa1100fb.c | |||
@@ -199,16 +199,20 @@ | |||
199 | extern void (*sa1100fb_backlight_power)(int on); | 199 | extern void (*sa1100fb_backlight_power)(int on); |
200 | extern void (*sa1100fb_lcd_power)(int on); | 200 | extern void (*sa1100fb_lcd_power)(int on); |
201 | 201 | ||
202 | /* | 202 | static struct sa1100fb_rgb rgb_4 = { |
203 | * IMHO this looks wrong. In 8BPP, length should be 8. | ||
204 | */ | ||
205 | static struct sa1100fb_rgb rgb_8 = { | ||
206 | .red = { .offset = 0, .length = 4, }, | 203 | .red = { .offset = 0, .length = 4, }, |
207 | .green = { .offset = 0, .length = 4, }, | 204 | .green = { .offset = 0, .length = 4, }, |
208 | .blue = { .offset = 0, .length = 4, }, | 205 | .blue = { .offset = 0, .length = 4, }, |
209 | .transp = { .offset = 0, .length = 0, }, | 206 | .transp = { .offset = 0, .length = 0, }, |
210 | }; | 207 | }; |
211 | 208 | ||
209 | static struct sa1100fb_rgb rgb_8 = { | ||
210 | .red = { .offset = 0, .length = 8, }, | ||
211 | .green = { .offset = 0, .length = 8, }, | ||
212 | .blue = { .offset = 0, .length = 8, }, | ||
213 | .transp = { .offset = 0, .length = 0, }, | ||
214 | }; | ||
215 | |||
212 | static struct sa1100fb_rgb def_rgb_16 = { | 216 | static struct sa1100fb_rgb def_rgb_16 = { |
213 | .red = { .offset = 11, .length = 5, }, | 217 | .red = { .offset = 11, .length = 5, }, |
214 | .green = { .offset = 5, .length = 6, }, | 218 | .green = { .offset = 5, .length = 6, }, |
@@ -613,7 +617,7 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) | |||
613 | DPRINTK("var->bits_per_pixel=%d\n", var->bits_per_pixel); | 617 | DPRINTK("var->bits_per_pixel=%d\n", var->bits_per_pixel); |
614 | switch (var->bits_per_pixel) { | 618 | switch (var->bits_per_pixel) { |
615 | case 4: | 619 | case 4: |
616 | rgbidx = RGB_8; | 620 | rgbidx = RGB_4; |
617 | break; | 621 | break; |
618 | case 8: | 622 | case 8: |
619 | rgbidx = RGB_8; | 623 | rgbidx = RGB_8; |
@@ -1382,6 +1386,7 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev) | |||
1382 | fbi->fb.monspecs = monspecs; | 1386 | fbi->fb.monspecs = monspecs; |
1383 | fbi->fb.pseudo_palette = (fbi + 1); | 1387 | fbi->fb.pseudo_palette = (fbi + 1); |
1384 | 1388 | ||
1389 | fbi->rgb[RGB_4] = &rgb_4; | ||
1385 | fbi->rgb[RGB_8] = &rgb_8; | 1390 | fbi->rgb[RGB_8] = &rgb_8; |
1386 | fbi->rgb[RGB_16] = &def_rgb_16; | 1391 | fbi->rgb[RGB_16] = &def_rgb_16; |
1387 | 1392 | ||
diff --git a/drivers/video/sa1100fb.h b/drivers/video/sa1100fb.h index 86831db9a042..1c3b459865d8 100644 --- a/drivers/video/sa1100fb.h +++ b/drivers/video/sa1100fb.h | |||
@@ -57,9 +57,10 @@ struct sa1100fb_lcd_reg { | |||
57 | unsigned long lccr3; | 57 | unsigned long lccr3; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | #define RGB_8 (0) | 60 | #define RGB_4 (0) |
61 | #define RGB_16 (1) | 61 | #define RGB_8 (1) |
62 | #define NR_RGB 2 | 62 | #define RGB_16 (2) |
63 | #define NR_RGB 3 | ||
63 | 64 | ||
64 | struct sa1100fb_info { | 65 | struct sa1100fb_info { |
65 | struct fb_info fb; | 66 | struct fb_info fb; |
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 346d6458cf76..7e17ee95a97a 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c | |||
@@ -1129,7 +1129,7 @@ sisfb_bpp_to_var(struct sis_video_info *ivideo, struct fb_var_screeninfo *var) | |||
1129 | switch(var->bits_per_pixel) { | 1129 | switch(var->bits_per_pixel) { |
1130 | case 8: | 1130 | case 8: |
1131 | var->red.offset = var->green.offset = var->blue.offset = 0; | 1131 | var->red.offset = var->green.offset = var->blue.offset = 0; |
1132 | var->red.length = var->green.length = var->blue.length = 6; | 1132 | var->red.length = var->green.length = var->blue.length = 8; |
1133 | break; | 1133 | break; |
1134 | case 16: | 1134 | case 16: |
1135 | var->red.offset = 11; | 1135 | var->red.offset = 11; |
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c index a439159204a8..89158bc71da2 100644 --- a/drivers/video/skeletonfb.c +++ b/drivers/video/skeletonfb.c | |||
@@ -308,9 +308,11 @@ static int xxxfb_setcolreg(unsigned regno, unsigned red, unsigned green, | |||
308 | * color depth = SUM(var->{color}.length) | 308 | * color depth = SUM(var->{color}.length) |
309 | * | 309 | * |
310 | * Pseudocolor: | 310 | * Pseudocolor: |
311 | * var->{color}.offset is 0 | 311 | * var->{color}.offset is 0 unless the palette index takes less than |
312 | * var->{color}.length contains width of DAC or the number of unique | 312 | * bits_per_pixel bits and is stored in the upper |
313 | * colors available (color depth) | 313 | * bits of the pixel value |
314 | * var->{color}.length is set so that 1 << length is the number of | ||
315 | * available palette entries | ||
314 | * pseudo_palette is not used | 316 | * pseudo_palette is not used |
315 | * RAMDAC[X] is programmed to (red, green, blue) | 317 | * RAMDAC[X] is programmed to (red, green, blue) |
316 | * color depth = var->{color}.length | 318 | * color depth = var->{color}.length |
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index 0b370aebdbfd..421770b5e6ab 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c | |||
@@ -55,6 +55,7 @@ static u16 maxvf __devinitdata; /* maximum vertical frequency */ | |||
55 | static u16 maxhf __devinitdata; /* maximum horizontal frequency */ | 55 | static u16 maxhf __devinitdata; /* maximum horizontal frequency */ |
56 | static u16 vbemode __devinitdata; /* force use of a specific VBE mode */ | 56 | static u16 vbemode __devinitdata; /* force use of a specific VBE mode */ |
57 | static char *mode_option __devinitdata; | 57 | static char *mode_option __devinitdata; |
58 | static u8 dac_width = 6; | ||
58 | 59 | ||
59 | static struct uvesafb_ktask *uvfb_tasks[UVESAFB_TASKS_MAX]; | 60 | static struct uvesafb_ktask *uvfb_tasks[UVESAFB_TASKS_MAX]; |
60 | static DEFINE_MUTEX(uvfb_lock); | 61 | static DEFINE_MUTEX(uvfb_lock); |
@@ -303,22 +304,10 @@ static void uvesafb_setup_var(struct fb_var_screeninfo *var, | |||
303 | var->blue.offset = 0; | 304 | var->blue.offset = 0; |
304 | var->transp.offset = 0; | 305 | var->transp.offset = 0; |
305 | 306 | ||
306 | /* | 307 | var->red.length = 8; |
307 | * We're assuming that we can switch the DAC to 8 bits. If | 308 | var->green.length = 8; |
308 | * this proves to be incorrect, we'll update the fields | 309 | var->blue.length = 8; |
309 | * later in set_par(). | 310 | var->transp.length = 0; |
310 | */ | ||
311 | if (par->vbe_ib.capabilities & VBE_CAP_CAN_SWITCH_DAC) { | ||
312 | var->red.length = 8; | ||
313 | var->green.length = 8; | ||
314 | var->blue.length = 8; | ||
315 | var->transp.length = 0; | ||
316 | } else { | ||
317 | var->red.length = 6; | ||
318 | var->green.length = 6; | ||
319 | var->blue.length = 6; | ||
320 | var->transp.length = 0; | ||
321 | } | ||
322 | } | 311 | } |
323 | } | 312 | } |
324 | 313 | ||
@@ -1006,7 +995,7 @@ static int uvesafb_setcolreg(unsigned regno, unsigned red, unsigned green, | |||
1006 | struct fb_info *info) | 995 | struct fb_info *info) |
1007 | { | 996 | { |
1008 | struct uvesafb_pal_entry entry; | 997 | struct uvesafb_pal_entry entry; |
1009 | int shift = 16 - info->var.green.length; | 998 | int shift = 16 - dac_width; |
1010 | int err = 0; | 999 | int err = 0; |
1011 | 1000 | ||
1012 | if (regno >= info->cmap.len) | 1001 | if (regno >= info->cmap.len) |
@@ -1055,7 +1044,7 @@ static int uvesafb_setcolreg(unsigned regno, unsigned red, unsigned green, | |||
1055 | static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info) | 1044 | static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info) |
1056 | { | 1045 | { |
1057 | struct uvesafb_pal_entry *entries; | 1046 | struct uvesafb_pal_entry *entries; |
1058 | int shift = 16 - info->var.green.length; | 1047 | int shift = 16 - dac_width; |
1059 | int i, err = 0; | 1048 | int i, err = 0; |
1060 | 1049 | ||
1061 | if (info->var.bits_per_pixel == 8) { | 1050 | if (info->var.bits_per_pixel == 8) { |
@@ -1317,13 +1306,9 @@ setmode: | |||
1317 | err = uvesafb_exec(task); | 1306 | err = uvesafb_exec(task); |
1318 | if (err || (task->t.regs.eax & 0xffff) != 0x004f || | 1307 | if (err || (task->t.regs.eax & 0xffff) != 0x004f || |
1319 | ((task->t.regs.ebx & 0xff00) >> 8) != 8) { | 1308 | ((task->t.regs.ebx & 0xff00) >> 8) != 8) { |
1320 | /* | 1309 | dac_width = 6; |
1321 | * We've failed to set the DAC palette format - | 1310 | } else { |
1322 | * time to correct var. | 1311 | dac_width = 8; |
1323 | */ | ||
1324 | info->var.red.length = 6; | ||
1325 | info->var.green.length = 6; | ||
1326 | info->var.blue.length = 6; | ||
1327 | } | 1312 | } |
1328 | } | 1313 | } |
1329 | 1314 | ||
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c index cc919ae46571..050d432c7d95 100644 --- a/drivers/video/vfb.c +++ b/drivers/video/vfb.c | |||
@@ -318,13 +318,16 @@ static int vfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, | |||
318 | * {hardwarespecific} contains width of RAMDAC | 318 | * {hardwarespecific} contains width of RAMDAC |
319 | * cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset) | 319 | * cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset) |
320 | * RAMDAC[X] is programmed to (red, green, blue) | 320 | * RAMDAC[X] is programmed to (red, green, blue) |
321 | * | 321 | * |
322 | * Pseudocolor: | 322 | * Pseudocolor: |
323 | * uses offset = 0 && length = RAMDAC register width. | 323 | * var->{color}.offset is 0 unless the palette index takes less than |
324 | * var->{color}.offset is 0 | 324 | * bits_per_pixel bits and is stored in the upper |
325 | * var->{color}.length contains widht of DAC | 325 | * bits of the pixel value |
326 | * var->{color}.length is set so that 1 << length is the number of available | ||
327 | * palette entries | ||
326 | * cmap is not used | 328 | * cmap is not used |
327 | * RAMDAC[X] is programmed to (red, green, blue) | 329 | * RAMDAC[X] is programmed to (red, green, blue) |
330 | * | ||
328 | * Truecolor: | 331 | * Truecolor: |
329 | * does not use DAC. Usually 3 are present. | 332 | * does not use DAC. Usually 3 are present. |
330 | * var->{color}.offset contains start of bitfield | 333 | * var->{color}.offset contains start of bitfield |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 59268266b79a..9c76a061a04d 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -190,7 +190,8 @@ static int balloon(void *_vballoon) | |||
190 | try_to_freeze(); | 190 | try_to_freeze(); |
191 | wait_event_interruptible(vb->config_change, | 191 | wait_event_interruptible(vb->config_change, |
192 | (diff = towards_target(vb)) != 0 | 192 | (diff = towards_target(vb)) != 0 |
193 | || kthread_should_stop()); | 193 | || kthread_should_stop() |
194 | || freezing(current)); | ||
194 | if (diff > 0) | 195 | if (diff > 0) |
195 | fill_balloon(vb, diff); | 196 | fill_balloon(vb, diff); |
196 | else if (diff < 0) | 197 | else if (diff < 0) |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 63024145215d..5eb8f21da82e 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -240,8 +240,6 @@ config ORION5X_WATCHDOG | |||
240 | To compile this driver as a module, choose M here: the | 240 | To compile this driver as a module, choose M here: the |
241 | module will be called orion5x_wdt. | 241 | module will be called orion5x_wdt. |
242 | 242 | ||
243 | # ARM26 Architecture | ||
244 | |||
245 | # AVR32 Architecture | 243 | # AVR32 Architecture |
246 | 244 | ||
247 | config AT32AP700X_WDT | 245 | config AT32AP700X_WDT |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 806b3eb08536..7f8c56b14f58 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -42,8 +42,6 @@ obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o | |||
42 | obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o | 42 | obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o |
43 | obj-$(CONFIG_ORION5X_WATCHDOG) += orion5x_wdt.o | 43 | obj-$(CONFIG_ORION5X_WATCHDOG) += orion5x_wdt.o |
44 | 44 | ||
45 | # ARM26 Architecture | ||
46 | |||
47 | # AVR32 Architecture | 45 | # AVR32 Architecture |
48 | obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o | 46 | obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o |
49 | 47 | ||
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c index e35d54589232..29e52c237a3b 100644 --- a/drivers/watchdog/at91rm9200_wdt.c +++ b/drivers/watchdog/at91rm9200_wdt.c | |||
@@ -197,7 +197,7 @@ static struct miscdevice at91wdt_miscdev = { | |||
197 | .fops = &at91wdt_fops, | 197 | .fops = &at91wdt_fops, |
198 | }; | 198 | }; |
199 | 199 | ||
200 | static int __init at91wdt_probe(struct platform_device *pdev) | 200 | static int __devinit at91wdt_probe(struct platform_device *pdev) |
201 | { | 201 | { |
202 | int res; | 202 | int res; |
203 | 203 | ||
@@ -214,7 +214,7 @@ static int __init at91wdt_probe(struct platform_device *pdev) | |||
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | 216 | ||
217 | static int __exit at91wdt_remove(struct platform_device *pdev) | 217 | static int __devexit at91wdt_remove(struct platform_device *pdev) |
218 | { | 218 | { |
219 | int res; | 219 | int res; |
220 | 220 | ||
@@ -252,7 +252,7 @@ static int at91wdt_resume(struct platform_device *pdev) | |||
252 | 252 | ||
253 | static struct platform_driver at91wdt_driver = { | 253 | static struct platform_driver at91wdt_driver = { |
254 | .probe = at91wdt_probe, | 254 | .probe = at91wdt_probe, |
255 | .remove = __exit_p(at91wdt_remove), | 255 | .remove = __devexit_p(at91wdt_remove), |
256 | .shutdown = at91wdt_shutdown, | 256 | .shutdown = at91wdt_shutdown, |
257 | .suspend = at91wdt_suspend, | 257 | .suspend = at91wdt_suspend, |
258 | .resume = at91wdt_resume, | 258 | .resume = at91wdt_resume, |
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c index 2dbe83570d65..7ba0b11ec525 100644 --- a/drivers/watchdog/i6300esb.c +++ b/drivers/watchdog/i6300esb.c | |||
@@ -52,10 +52,10 @@ | |||
52 | #define ESB_LOCK_REG 0x68 /* WDT lock register */ | 52 | #define ESB_LOCK_REG 0x68 /* WDT lock register */ |
53 | 53 | ||
54 | /* Memory mapped registers */ | 54 | /* Memory mapped registers */ |
55 | #define ESB_TIMER1_REG BASEADDR + 0x00 /* Timer1 value after each reset */ | 55 | #define ESB_TIMER1_REG (BASEADDR + 0x00)/* Timer1 value after each reset */ |
56 | #define ESB_TIMER2_REG BASEADDR + 0x04 /* Timer2 value after each reset */ | 56 | #define ESB_TIMER2_REG (BASEADDR + 0x04)/* Timer2 value after each reset */ |
57 | #define ESB_GINTSR_REG BASEADDR + 0x08 /* General Interrupt Status Register */ | 57 | #define ESB_GINTSR_REG (BASEADDR + 0x08)/* General Interrupt Status Register */ |
58 | #define ESB_RELOAD_REG BASEADDR + 0x0c /* Reload register */ | 58 | #define ESB_RELOAD_REG (BASEADDR + 0x0c)/* Reload register */ |
59 | 59 | ||
60 | /* Lock register bits */ | 60 | /* Lock register bits */ |
61 | #define ESB_WDT_FUNC (0x01 << 2) /* Watchdog functionality */ | 61 | #define ESB_WDT_FUNC (0x01 << 2) /* Watchdog functionality */ |
@@ -68,6 +68,7 @@ | |||
68 | #define ESB_WDT_INTTYPE (0x11 << 0) /* Interrupt type on timer1 timeout */ | 68 | #define ESB_WDT_INTTYPE (0x11 << 0) /* Interrupt type on timer1 timeout */ |
69 | 69 | ||
70 | /* Reload register bits */ | 70 | /* Reload register bits */ |
71 | #define ESB_WDT_TIMEOUT (0x01 << 9) /* Watchdog timed out */ | ||
71 | #define ESB_WDT_RELOAD (0x01 << 8) /* prevent timeout */ | 72 | #define ESB_WDT_RELOAD (0x01 << 8) /* prevent timeout */ |
72 | 73 | ||
73 | /* Magic constants */ | 74 | /* Magic constants */ |
@@ -87,7 +88,6 @@ static struct platform_device *esb_platform_device; | |||
87 | /* 30 sec default heartbeat (1 < heartbeat < 2*1023) */ | 88 | /* 30 sec default heartbeat (1 < heartbeat < 2*1023) */ |
88 | #define WATCHDOG_HEARTBEAT 30 | 89 | #define WATCHDOG_HEARTBEAT 30 |
89 | static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ | 90 | static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ |
90 | |||
91 | module_param(heartbeat, int, 0); | 91 | module_param(heartbeat, int, 0); |
92 | MODULE_PARM_DESC(heartbeat, | 92 | MODULE_PARM_DESC(heartbeat, |
93 | "Watchdog heartbeat in seconds. (1<heartbeat<2046, default=" | 93 | "Watchdog heartbeat in seconds. (1<heartbeat<2046, default=" |
@@ -123,7 +123,7 @@ static int esb_timer_start(void) | |||
123 | esb_unlock_registers(); | 123 | esb_unlock_registers(); |
124 | writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); | 124 | writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); |
125 | /* Enable or Enable + Lock? */ | 125 | /* Enable or Enable + Lock? */ |
126 | val = 0x02 | (nowayout ? 0x01 : 0x00); | 126 | val = ESB_WDT_ENABLE | (nowayout ? ESB_WDT_LOCK : 0x00); |
127 | pci_write_config_byte(esb_pci, ESB_LOCK_REG, val); | 127 | pci_write_config_byte(esb_pci, ESB_LOCK_REG, val); |
128 | spin_unlock(&esb_lock); | 128 | spin_unlock(&esb_lock); |
129 | return 0; | 129 | return 0; |
@@ -143,7 +143,7 @@ static int esb_timer_stop(void) | |||
143 | spin_unlock(&esb_lock); | 143 | spin_unlock(&esb_lock); |
144 | 144 | ||
145 | /* Returns 0 if the timer was disabled, non-zero otherwise */ | 145 | /* Returns 0 if the timer was disabled, non-zero otherwise */ |
146 | return (val & 0x01); | 146 | return val & ESB_WDT_ENABLE; |
147 | } | 147 | } |
148 | 148 | ||
149 | static void esb_timer_keepalive(void) | 149 | static void esb_timer_keepalive(void) |
@@ -190,18 +190,6 @@ static int esb_timer_set_heartbeat(int time) | |||
190 | return 0; | 190 | return 0; |
191 | } | 191 | } |
192 | 192 | ||
193 | static int esb_timer_read(void) | ||
194 | { | ||
195 | u32 count; | ||
196 | |||
197 | /* This isn't documented, and doesn't take into | ||
198 | * acount which stage is running, but it looks | ||
199 | * like a 20 bit count down, so we might as well report it. | ||
200 | */ | ||
201 | pci_read_config_dword(esb_pci, 0x64, &count); | ||
202 | return (int)count; | ||
203 | } | ||
204 | |||
205 | /* | 193 | /* |
206 | * /dev/watchdog handling | 194 | * /dev/watchdog handling |
207 | */ | 195 | */ |
@@ -282,7 +270,7 @@ static long esb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
282 | sizeof(ident)) ? -EFAULT : 0; | 270 | sizeof(ident)) ? -EFAULT : 0; |
283 | 271 | ||
284 | case WDIOC_GETSTATUS: | 272 | case WDIOC_GETSTATUS: |
285 | return put_user(esb_timer_read(), p); | 273 | return put_user(0, p); |
286 | 274 | ||
287 | case WDIOC_GETBOOTSTATUS: | 275 | case WDIOC_GETBOOTSTATUS: |
288 | return put_user(triggered, p); | 276 | return put_user(triggered, p); |
@@ -362,8 +350,6 @@ MODULE_DEVICE_TABLE(pci, esb_pci_tbl); | |||
362 | 350 | ||
363 | static unsigned char __devinit esb_getdevice(void) | 351 | static unsigned char __devinit esb_getdevice(void) |
364 | { | 352 | { |
365 | u8 val1; | ||
366 | unsigned short val2; | ||
367 | /* | 353 | /* |
368 | * Find the PCI device | 354 | * Find the PCI device |
369 | */ | 355 | */ |
@@ -371,66 +357,79 @@ static unsigned char __devinit esb_getdevice(void) | |||
371 | esb_pci = pci_get_device(PCI_VENDOR_ID_INTEL, | 357 | esb_pci = pci_get_device(PCI_VENDOR_ID_INTEL, |
372 | PCI_DEVICE_ID_INTEL_ESB_9, NULL); | 358 | PCI_DEVICE_ID_INTEL_ESB_9, NULL); |
373 | 359 | ||
374 | if (esb_pci) { | 360 | if (!esb_pci) |
375 | if (pci_enable_device(esb_pci)) { | 361 | return 0; |
376 | printk(KERN_ERR PFX "failed to enable device\n"); | ||
377 | goto err_devput; | ||
378 | } | ||
379 | 362 | ||
380 | if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) { | 363 | if (pci_enable_device(esb_pci)) { |
381 | printk(KERN_ERR PFX "failed to request region\n"); | 364 | printk(KERN_ERR PFX "failed to enable device\n"); |
382 | goto err_disable; | 365 | goto err_devput; |
383 | } | 366 | } |
384 | 367 | ||
385 | BASEADDR = pci_ioremap_bar(esb_pci, 0); | 368 | if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) { |
386 | if (BASEADDR == NULL) { | 369 | printk(KERN_ERR PFX "failed to request region\n"); |
387 | /* Something's wrong here, BASEADDR has to be set */ | 370 | goto err_disable; |
388 | printk(KERN_ERR PFX "failed to get BASEADDR\n"); | 371 | } |
389 | goto err_release; | ||
390 | } | ||
391 | 372 | ||
392 | /* | 373 | BASEADDR = pci_ioremap_bar(esb_pci, 0); |
393 | * The watchdog has two timers, it can be setup so that the | 374 | if (BASEADDR == NULL) { |
394 | * expiry of timer1 results in an interrupt and the expiry of | 375 | /* Something's wrong here, BASEADDR has to be set */ |
395 | * timer2 results in a reboot. We set it to not generate | 376 | printk(KERN_ERR PFX "failed to get BASEADDR\n"); |
396 | * any interrupts as there is not much we can do with it | 377 | goto err_release; |
397 | * right now. | 378 | } |
398 | * | 379 | |
399 | * We also enable reboots and set the timer frequency to | 380 | /* Done */ |
400 | * the PCI clock divided by 2^15 (approx 1KHz). | 381 | return 1; |
401 | */ | ||
402 | pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003); | ||
403 | |||
404 | /* Check that the WDT isn't already locked */ | ||
405 | pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1); | ||
406 | if (val1 & ESB_WDT_LOCK) | ||
407 | printk(KERN_WARNING PFX "nowayout already set\n"); | ||
408 | |||
409 | /* Set the timer to watchdog mode and disable it for now */ | ||
410 | pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00); | ||
411 | |||
412 | /* Check if the watchdog was previously triggered */ | ||
413 | esb_unlock_registers(); | ||
414 | val2 = readw(ESB_RELOAD_REG); | ||
415 | triggered = (val2 & (0x01 << 9) >> 9); | ||
416 | |||
417 | /* Reset trigger flag and timers */ | ||
418 | esb_unlock_registers(); | ||
419 | writew((0x11 << 8), ESB_RELOAD_REG); | ||
420 | |||
421 | /* Done */ | ||
422 | return 1; | ||
423 | 382 | ||
424 | err_release: | 383 | err_release: |
425 | pci_release_region(esb_pci, 0); | 384 | pci_release_region(esb_pci, 0); |
426 | err_disable: | 385 | err_disable: |
427 | pci_disable_device(esb_pci); | 386 | pci_disable_device(esb_pci); |
428 | err_devput: | 387 | err_devput: |
429 | pci_dev_put(esb_pci); | 388 | pci_dev_put(esb_pci); |
430 | } | ||
431 | return 0; | 389 | return 0; |
432 | } | 390 | } |
433 | 391 | ||
392 | static void __devinit esb_initdevice(void) | ||
393 | { | ||
394 | u8 val1; | ||
395 | u16 val2; | ||
396 | |||
397 | /* | ||
398 | * Config register: | ||
399 | * Bit 5 : 0 = Enable WDT_OUTPUT | ||
400 | * Bit 2 : 0 = set the timer frequency to the PCI clock | ||
401 | * divided by 2^15 (approx 1KHz). | ||
402 | * Bits 1:0 : 11 = WDT_INT_TYPE Disabled. | ||
403 | * The watchdog has two timers, it can be setup so that the | ||
404 | * expiry of timer1 results in an interrupt and the expiry of | ||
405 | * timer2 results in a reboot. We set it to not generate | ||
406 | * any interrupts as there is not much we can do with it | ||
407 | * right now. | ||
408 | */ | ||
409 | pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003); | ||
410 | |||
411 | /* Check that the WDT isn't already locked */ | ||
412 | pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1); | ||
413 | if (val1 & ESB_WDT_LOCK) | ||
414 | printk(KERN_WARNING PFX "nowayout already set\n"); | ||
415 | |||
416 | /* Set the timer to watchdog mode and disable it for now */ | ||
417 | pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00); | ||
418 | |||
419 | /* Check if the watchdog was previously triggered */ | ||
420 | esb_unlock_registers(); | ||
421 | val2 = readw(ESB_RELOAD_REG); | ||
422 | if (val2 & ESB_WDT_TIMEOUT) | ||
423 | triggered = WDIOF_CARDRESET; | ||
424 | |||
425 | /* Reset WDT_TIMEOUT flag and timers */ | ||
426 | esb_unlock_registers(); | ||
427 | writew((ESB_WDT_TIMEOUT | ESB_WDT_RELOAD), ESB_RELOAD_REG); | ||
428 | |||
429 | /* And set the correct timeout value */ | ||
430 | esb_timer_set_heartbeat(heartbeat); | ||
431 | } | ||
432 | |||
434 | static int __devinit esb_probe(struct platform_device *dev) | 433 | static int __devinit esb_probe(struct platform_device *dev) |
435 | { | 434 | { |
436 | int ret; | 435 | int ret; |
@@ -441,13 +440,17 @@ static int __devinit esb_probe(struct platform_device *dev) | |||
441 | 440 | ||
442 | /* Check that the heartbeat value is within it's range; | 441 | /* Check that the heartbeat value is within it's range; |
443 | if not reset to the default */ | 442 | if not reset to the default */ |
444 | if (esb_timer_set_heartbeat(heartbeat)) { | 443 | if (heartbeat < 0x1 || heartbeat > 2 * 0x03ff) { |
445 | esb_timer_set_heartbeat(WATCHDOG_HEARTBEAT); | 444 | heartbeat = WATCHDOG_HEARTBEAT; |
446 | printk(KERN_INFO PFX | 445 | printk(KERN_INFO PFX |
447 | "heartbeat value must be 1<heartbeat<2046, using %d\n", | 446 | "heartbeat value must be 1<heartbeat<2046, using %d\n", |
448 | heartbeat); | 447 | heartbeat); |
449 | } | 448 | } |
450 | 449 | ||
450 | /* Initialize the watchdog and make sure it does not run */ | ||
451 | esb_initdevice(); | ||
452 | |||
453 | /* Register the watchdog so that userspace has access to it */ | ||
451 | ret = misc_register(&esb_miscdev); | 454 | ret = misc_register(&esb_miscdev); |
452 | if (ret != 0) { | 455 | if (ret != 0) { |
453 | printk(KERN_ERR PFX | 456 | printk(KERN_ERR PFX |
@@ -455,7 +458,6 @@ static int __devinit esb_probe(struct platform_device *dev) | |||
455 | WATCHDOG_MINOR, ret); | 458 | WATCHDOG_MINOR, ret); |
456 | goto err_unmap; | 459 | goto err_unmap; |
457 | } | 460 | } |
458 | esb_timer_stop(); | ||
459 | printk(KERN_INFO PFX | 461 | printk(KERN_INFO PFX |
460 | "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", | 462 | "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", |
461 | BASEADDR, heartbeat, nowayout); | 463 | BASEADDR, heartbeat, nowayout); |
@@ -463,11 +465,8 @@ static int __devinit esb_probe(struct platform_device *dev) | |||
463 | 465 | ||
464 | err_unmap: | 466 | err_unmap: |
465 | iounmap(BASEADDR); | 467 | iounmap(BASEADDR); |
466 | /* err_release: */ | ||
467 | pci_release_region(esb_pci, 0); | 468 | pci_release_region(esb_pci, 0); |
468 | /* err_disable: */ | ||
469 | pci_disable_device(esb_pci); | 469 | pci_disable_device(esb_pci); |
470 | /* err_devput: */ | ||
471 | pci_dev_put(esb_pci); | 470 | pci_dev_put(esb_pci); |
472 | return ret; | 471 | return ret; |
473 | } | 472 | } |
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c index 74c92d384112..ae3832110acb 100644 --- a/drivers/watchdog/ks8695_wdt.c +++ b/drivers/watchdog/ks8695_wdt.c | |||
@@ -221,7 +221,7 @@ static struct miscdevice ks8695wdt_miscdev = { | |||
221 | .fops = &ks8695wdt_fops, | 221 | .fops = &ks8695wdt_fops, |
222 | }; | 222 | }; |
223 | 223 | ||
224 | static int __init ks8695wdt_probe(struct platform_device *pdev) | 224 | static int __devinit ks8695wdt_probe(struct platform_device *pdev) |
225 | { | 225 | { |
226 | int res; | 226 | int res; |
227 | 227 | ||
@@ -238,7 +238,7 @@ static int __init ks8695wdt_probe(struct platform_device *pdev) | |||
238 | return 0; | 238 | return 0; |
239 | } | 239 | } |
240 | 240 | ||
241 | static int __exit ks8695wdt_remove(struct platform_device *pdev) | 241 | static int __devexit ks8695wdt_remove(struct platform_device *pdev) |
242 | { | 242 | { |
243 | int res; | 243 | int res; |
244 | 244 | ||
@@ -276,7 +276,7 @@ static int ks8695wdt_resume(struct platform_device *pdev) | |||
276 | 276 | ||
277 | static struct platform_driver ks8695wdt_driver = { | 277 | static struct platform_driver ks8695wdt_driver = { |
278 | .probe = ks8695wdt_probe, | 278 | .probe = ks8695wdt_probe, |
279 | .remove = __exit_p(ks8695wdt_remove), | 279 | .remove = __devexit_p(ks8695wdt_remove), |
280 | .shutdown = ks8695wdt_shutdown, | 280 | .shutdown = ks8695wdt_shutdown, |
281 | .suspend = ks8695wdt_suspend, | 281 | .suspend = ks8695wdt_suspend, |
282 | .resume = ks8695wdt_resume, | 282 | .resume = ks8695wdt_resume, |
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index aa5ad6e33f02..f2713851aaab 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c | |||
@@ -258,7 +258,7 @@ static const struct file_operations omap_wdt_fops = { | |||
258 | .release = omap_wdt_release, | 258 | .release = omap_wdt_release, |
259 | }; | 259 | }; |
260 | 260 | ||
261 | static int __init omap_wdt_probe(struct platform_device *pdev) | 261 | static int __devinit omap_wdt_probe(struct platform_device *pdev) |
262 | { | 262 | { |
263 | struct resource *res, *mem; | 263 | struct resource *res, *mem; |
264 | struct omap_wdt_dev *wdev; | 264 | struct omap_wdt_dev *wdev; |
@@ -367,7 +367,7 @@ static void omap_wdt_shutdown(struct platform_device *pdev) | |||
367 | omap_wdt_disable(wdev); | 367 | omap_wdt_disable(wdev); |
368 | } | 368 | } |
369 | 369 | ||
370 | static int omap_wdt_remove(struct platform_device *pdev) | 370 | static int __devexit omap_wdt_remove(struct platform_device *pdev) |
371 | { | 371 | { |
372 | struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); | 372 | struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); |
373 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 373 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -426,7 +426,7 @@ static int omap_wdt_resume(struct platform_device *pdev) | |||
426 | 426 | ||
427 | static struct platform_driver omap_wdt_driver = { | 427 | static struct platform_driver omap_wdt_driver = { |
428 | .probe = omap_wdt_probe, | 428 | .probe = omap_wdt_probe, |
429 | .remove = omap_wdt_remove, | 429 | .remove = __devexit_p(omap_wdt_remove), |
430 | .shutdown = omap_wdt_shutdown, | 430 | .shutdown = omap_wdt_shutdown, |
431 | .suspend = omap_wdt_suspend, | 431 | .suspend = omap_wdt_suspend, |
432 | .resume = omap_wdt_resume, | 432 | .resume = omap_wdt_resume, |
diff --git a/drivers/watchdog/orion5x_wdt.c b/drivers/watchdog/orion5x_wdt.c index e81441f103dd..7529616739d2 100644 --- a/drivers/watchdog/orion5x_wdt.c +++ b/drivers/watchdog/orion5x_wdt.c | |||
@@ -42,7 +42,17 @@ static unsigned int wdt_tclk; | |||
42 | static unsigned long wdt_status; | 42 | static unsigned long wdt_status; |
43 | static spinlock_t wdt_lock; | 43 | static spinlock_t wdt_lock; |
44 | 44 | ||
45 | static void wdt_enable(void) | 45 | static void orion5x_wdt_ping(void) |
46 | { | ||
47 | spin_lock(&wdt_lock); | ||
48 | |||
49 | /* Reload watchdog duration */ | ||
50 | writel(wdt_tclk * heartbeat, WDT_VAL); | ||
51 | |||
52 | spin_unlock(&wdt_lock); | ||
53 | } | ||
54 | |||
55 | static void orion5x_wdt_enable(void) | ||
46 | { | 56 | { |
47 | u32 reg; | 57 | u32 reg; |
48 | 58 | ||
@@ -69,7 +79,7 @@ static void wdt_enable(void) | |||
69 | spin_unlock(&wdt_lock); | 79 | spin_unlock(&wdt_lock); |
70 | } | 80 | } |
71 | 81 | ||
72 | static void wdt_disable(void) | 82 | static void orion5x_wdt_disable(void) |
73 | { | 83 | { |
74 | u32 reg; | 84 | u32 reg; |
75 | 85 | ||
@@ -101,7 +111,7 @@ static int orion5x_wdt_open(struct inode *inode, struct file *file) | |||
101 | if (test_and_set_bit(WDT_IN_USE, &wdt_status)) | 111 | if (test_and_set_bit(WDT_IN_USE, &wdt_status)) |
102 | return -EBUSY; | 112 | return -EBUSY; |
103 | clear_bit(WDT_OK_TO_CLOSE, &wdt_status); | 113 | clear_bit(WDT_OK_TO_CLOSE, &wdt_status); |
104 | wdt_enable(); | 114 | orion5x_wdt_enable(); |
105 | return nonseekable_open(inode, file); | 115 | return nonseekable_open(inode, file); |
106 | } | 116 | } |
107 | 117 | ||
@@ -122,18 +132,28 @@ static ssize_t orion5x_wdt_write(struct file *file, const char *data, | |||
122 | set_bit(WDT_OK_TO_CLOSE, &wdt_status); | 132 | set_bit(WDT_OK_TO_CLOSE, &wdt_status); |
123 | } | 133 | } |
124 | } | 134 | } |
125 | wdt_enable(); | 135 | orion5x_wdt_ping(); |
126 | } | 136 | } |
127 | return len; | 137 | return len; |
128 | } | 138 | } |
129 | 139 | ||
130 | static struct watchdog_info ident = { | 140 | static int orion5x_wdt_settimeout(int new_time) |
141 | { | ||
142 | if ((new_time <= 0) || (new_time > wdt_max_duration)) | ||
143 | return -EINVAL; | ||
144 | |||
145 | /* Set new watchdog time to be used when | ||
146 | * orion5x_wdt_enable() or orion5x_wdt_ping() is called. */ | ||
147 | heartbeat = new_time; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static const struct watchdog_info ident = { | ||
131 | .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | | 152 | .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | |
132 | WDIOF_KEEPALIVEPING, | 153 | WDIOF_KEEPALIVEPING, |
133 | .identity = "Orion5x Watchdog", | 154 | .identity = "Orion5x Watchdog", |
134 | }; | 155 | }; |
135 | 156 | ||
136 | |||
137 | static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, | 157 | static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, |
138 | unsigned long arg) | 158 | unsigned long arg) |
139 | { | 159 | { |
@@ -152,7 +172,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, | |||
152 | break; | 172 | break; |
153 | 173 | ||
154 | case WDIOC_KEEPALIVE: | 174 | case WDIOC_KEEPALIVE: |
155 | wdt_enable(); | 175 | orion5x_wdt_ping(); |
156 | ret = 0; | 176 | ret = 0; |
157 | break; | 177 | break; |
158 | 178 | ||
@@ -161,12 +181,11 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, | |||
161 | if (ret) | 181 | if (ret) |
162 | break; | 182 | break; |
163 | 183 | ||
164 | if (time <= 0 || time > wdt_max_duration) { | 184 | if (orion5x_wdt_settimeout(time)) { |
165 | ret = -EINVAL; | 185 | ret = -EINVAL; |
166 | break; | 186 | break; |
167 | } | 187 | } |
168 | heartbeat = time; | 188 | orion5x_wdt_ping(); |
169 | wdt_enable(); | ||
170 | /* Fall through */ | 189 | /* Fall through */ |
171 | 190 | ||
172 | case WDIOC_GETTIMEOUT: | 191 | case WDIOC_GETTIMEOUT: |
@@ -187,7 +206,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, | |||
187 | static int orion5x_wdt_release(struct inode *inode, struct file *file) | 206 | static int orion5x_wdt_release(struct inode *inode, struct file *file) |
188 | { | 207 | { |
189 | if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) | 208 | if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) |
190 | wdt_disable(); | 209 | orion5x_wdt_disable(); |
191 | else | 210 | else |
192 | printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " | 211 | printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " |
193 | "timer will not stop\n"); | 212 | "timer will not stop\n"); |
@@ -230,7 +249,7 @@ static int __devinit orion5x_wdt_probe(struct platform_device *pdev) | |||
230 | orion5x_wdt_miscdev.parent = &pdev->dev; | 249 | orion5x_wdt_miscdev.parent = &pdev->dev; |
231 | 250 | ||
232 | wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk; | 251 | wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk; |
233 | if (heartbeat <= 0 || heartbeat > wdt_max_duration) | 252 | if (orion5x_wdt_settimeout(heartbeat)) |
234 | heartbeat = wdt_max_duration; | 253 | heartbeat = wdt_max_duration; |
235 | 254 | ||
236 | ret = misc_register(&orion5x_wdt_miscdev); | 255 | ret = misc_register(&orion5x_wdt_miscdev); |
@@ -247,7 +266,7 @@ static int __devexit orion5x_wdt_remove(struct platform_device *pdev) | |||
247 | int ret; | 266 | int ret; |
248 | 267 | ||
249 | if (test_bit(WDT_IN_USE, &wdt_status)) { | 268 | if (test_bit(WDT_IN_USE, &wdt_status)) { |
250 | wdt_disable(); | 269 | orion5x_wdt_disable(); |
251 | clear_bit(WDT_IN_USE, &wdt_status); | 270 | clear_bit(WDT_IN_USE, &wdt_status); |
252 | } | 271 | } |
253 | 272 | ||
@@ -258,9 +277,16 @@ static int __devexit orion5x_wdt_remove(struct platform_device *pdev) | |||
258 | return ret; | 277 | return ret; |
259 | } | 278 | } |
260 | 279 | ||
280 | static void orion5x_wdt_shutdown(struct platform_device *pdev) | ||
281 | { | ||
282 | if (test_bit(WDT_IN_USE, &wdt_status)) | ||
283 | orion5x_wdt_disable(); | ||
284 | } | ||
285 | |||
261 | static struct platform_driver orion5x_wdt_driver = { | 286 | static struct platform_driver orion5x_wdt_driver = { |
262 | .probe = orion5x_wdt_probe, | 287 | .probe = orion5x_wdt_probe, |
263 | .remove = __devexit_p(orion5x_wdt_remove), | 288 | .remove = __devexit_p(orion5x_wdt_remove), |
289 | .shutdown = orion5x_wdt_shutdown, | ||
264 | .driver = { | 290 | .driver = { |
265 | .owner = THIS_MODULE, | 291 | .owner = THIS_MODULE, |
266 | .name = "orion5x_wdt", | 292 | .name = "orion5x_wdt", |
@@ -285,10 +311,11 @@ MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>"); | |||
285 | MODULE_DESCRIPTION("Orion5x Processor Watchdog"); | 311 | MODULE_DESCRIPTION("Orion5x Processor Watchdog"); |
286 | 312 | ||
287 | module_param(heartbeat, int, 0); | 313 | module_param(heartbeat, int, 0); |
288 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds"); | 314 | MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds"); |
289 | 315 | ||
290 | module_param(nowayout, int, 0); | 316 | module_param(nowayout, int, 0); |
291 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); | 317 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" |
318 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | ||
292 | 319 | ||
293 | MODULE_LICENSE("GPL"); | 320 | MODULE_LICENSE("GPL"); |
294 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | 321 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); |
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index 5f54c01c1568..bdfd584ad853 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c | |||
@@ -21,29 +21,41 @@ static void disable_hotplug_cpu(int cpu) | |||
21 | set_cpu_present(cpu, false); | 21 | set_cpu_present(cpu, false); |
22 | } | 22 | } |
23 | 23 | ||
24 | static void vcpu_hotplug(unsigned int cpu) | 24 | static int vcpu_online(unsigned int cpu) |
25 | { | 25 | { |
26 | int err; | 26 | int err; |
27 | char dir[32], state[32]; | 27 | char dir[32], state[32]; |
28 | 28 | ||
29 | if (!cpu_possible(cpu)) | ||
30 | return; | ||
31 | |||
32 | sprintf(dir, "cpu/%u", cpu); | 29 | sprintf(dir, "cpu/%u", cpu); |
33 | err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); | 30 | err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); |
34 | if (err != 1) { | 31 | if (err != 1) { |
35 | printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); | 32 | printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); |
36 | return; | 33 | return err; |
37 | } | 34 | } |
38 | 35 | ||
39 | if (strcmp(state, "online") == 0) { | 36 | if (strcmp(state, "online") == 0) |
37 | return 1; | ||
38 | else if (strcmp(state, "offline") == 0) | ||
39 | return 0; | ||
40 | |||
41 | printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n", state, cpu); | ||
42 | return -EINVAL; | ||
43 | } | ||
44 | static void vcpu_hotplug(unsigned int cpu) | ||
45 | { | ||
46 | if (!cpu_possible(cpu)) | ||
47 | return; | ||
48 | |||
49 | switch (vcpu_online(cpu)) { | ||
50 | case 1: | ||
40 | enable_hotplug_cpu(cpu); | 51 | enable_hotplug_cpu(cpu); |
41 | } else if (strcmp(state, "offline") == 0) { | 52 | break; |
53 | case 0: | ||
42 | (void)cpu_down(cpu); | 54 | (void)cpu_down(cpu); |
43 | disable_hotplug_cpu(cpu); | 55 | disable_hotplug_cpu(cpu); |
44 | } else { | 56 | break; |
45 | printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n", | 57 | default: |
46 | state, cpu); | 58 | break; |
47 | } | 59 | } |
48 | } | 60 | } |
49 | 61 | ||
@@ -64,12 +76,20 @@ static void handle_vcpu_hotplug_event(struct xenbus_watch *watch, | |||
64 | static int setup_cpu_watcher(struct notifier_block *notifier, | 76 | static int setup_cpu_watcher(struct notifier_block *notifier, |
65 | unsigned long event, void *data) | 77 | unsigned long event, void *data) |
66 | { | 78 | { |
79 | int cpu; | ||
67 | static struct xenbus_watch cpu_watch = { | 80 | static struct xenbus_watch cpu_watch = { |
68 | .node = "cpu", | 81 | .node = "cpu", |
69 | .callback = handle_vcpu_hotplug_event}; | 82 | .callback = handle_vcpu_hotplug_event}; |
70 | 83 | ||
71 | (void)register_xenbus_watch(&cpu_watch); | 84 | (void)register_xenbus_watch(&cpu_watch); |
72 | 85 | ||
86 | for_each_possible_cpu(cpu) { | ||
87 | if (vcpu_online(cpu) == 0) { | ||
88 | (void)cpu_down(cpu); | ||
89 | cpu_clear(cpu, cpu_present_map); | ||
90 | } | ||
91 | } | ||
92 | |||
73 | return NOTIFY_DONE; | 93 | return NOTIFY_DONE; |
74 | } | 94 | } |
75 | 95 | ||
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 0d61db1e7b49..4b5b84837ee1 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -62,14 +62,15 @@ static int xen_suspend(void *data) | |||
62 | gnttab_resume(); | 62 | gnttab_resume(); |
63 | xen_mm_unpin_all(); | 63 | xen_mm_unpin_all(); |
64 | 64 | ||
65 | sysdev_resume(); | ||
66 | |||
67 | if (!*cancelled) { | 65 | if (!*cancelled) { |
68 | xen_irq_resume(); | 66 | xen_irq_resume(); |
69 | xen_console_resume(); | 67 | xen_console_resume(); |
70 | xen_timer_resume(); | 68 | xen_timer_resume(); |
71 | } | 69 | } |
72 | 70 | ||
71 | sysdev_resume(); | ||
72 | device_power_up(PMSG_RESUME); | ||
73 | |||
73 | return 0; | 74 | return 0; |
74 | } | 75 | } |
75 | 76 | ||