diff options
Diffstat (limited to 'drivers')
183 files changed, 3195 insertions, 2837 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 43a95e5640de..5b73f6a2cd86 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -92,6 +92,7 @@ struct acpi_ac { | |||
92 | 92 | ||
93 | #ifdef CONFIG_ACPI_PROCFS_POWER | 93 | #ifdef CONFIG_ACPI_PROCFS_POWER |
94 | static const struct file_operations acpi_ac_fops = { | 94 | static const struct file_operations acpi_ac_fops = { |
95 | .owner = THIS_MODULE, | ||
95 | .open = acpi_ac_open_fs, | 96 | .open = acpi_ac_open_fs, |
96 | .read = seq_read, | 97 | .read = seq_read, |
97 | .llseek = seq_lseek, | 98 | .llseek = seq_lseek, |
@@ -195,16 +196,11 @@ static int acpi_ac_add_fs(struct acpi_device *device) | |||
195 | } | 196 | } |
196 | 197 | ||
197 | /* 'state' [R] */ | 198 | /* 'state' [R] */ |
198 | entry = create_proc_entry(ACPI_AC_FILE_STATE, | 199 | entry = proc_create_data(ACPI_AC_FILE_STATE, |
199 | S_IRUGO, acpi_device_dir(device)); | 200 | S_IRUGO, acpi_device_dir(device), |
201 | &acpi_ac_fops, acpi_driver_data(device)); | ||
200 | if (!entry) | 202 | if (!entry) |
201 | return -ENODEV; | 203 | return -ENODEV; |
202 | else { | ||
203 | entry->proc_fops = &acpi_ac_fops; | ||
204 | entry->data = acpi_driver_data(device); | ||
205 | entry->owner = THIS_MODULE; | ||
206 | } | ||
207 | |||
208 | return 0; | 204 | return 0; |
209 | } | 205 | } |
210 | 206 | ||
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index d5729d5dc190..b1c723f9f58d 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -741,15 +741,13 @@ static int acpi_battery_add_fs(struct acpi_device *device) | |||
741 | } | 741 | } |
742 | 742 | ||
743 | for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { | 743 | for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { |
744 | entry = create_proc_entry(acpi_battery_file[i].name, | 744 | entry = proc_create_data(acpi_battery_file[i].name, |
745 | acpi_battery_file[i].mode, acpi_device_dir(device)); | 745 | acpi_battery_file[i].mode, |
746 | acpi_device_dir(device), | ||
747 | &acpi_battery_file[i].ops, | ||
748 | acpi_driver_data(device)); | ||
746 | if (!entry) | 749 | if (!entry) |
747 | return -ENODEV; | 750 | return -ENODEV; |
748 | else { | ||
749 | entry->proc_fops = &acpi_battery_file[i].ops; | ||
750 | entry->data = acpi_driver_data(device); | ||
751 | entry->owner = THIS_MODULE; | ||
752 | } | ||
753 | } | 751 | } |
754 | return 0; | 752 | return 0; |
755 | } | 753 | } |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 6c5da83cdb68..1dfec413588c 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -102,6 +102,7 @@ struct acpi_button { | |||
102 | }; | 102 | }; |
103 | 103 | ||
104 | static const struct file_operations acpi_button_info_fops = { | 104 | static const struct file_operations acpi_button_info_fops = { |
105 | .owner = THIS_MODULE, | ||
105 | .open = acpi_button_info_open_fs, | 106 | .open = acpi_button_info_open_fs, |
106 | .read = seq_read, | 107 | .read = seq_read, |
107 | .llseek = seq_lseek, | 108 | .llseek = seq_lseek, |
@@ -109,6 +110,7 @@ static const struct file_operations acpi_button_info_fops = { | |||
109 | }; | 110 | }; |
110 | 111 | ||
111 | static const struct file_operations acpi_button_state_fops = { | 112 | static const struct file_operations acpi_button_state_fops = { |
113 | .owner = THIS_MODULE, | ||
112 | .open = acpi_button_state_open_fs, | 114 | .open = acpi_button_state_open_fs, |
113 | .read = seq_read, | 115 | .read = seq_read, |
114 | .llseek = seq_lseek, | 116 | .llseek = seq_lseek, |
@@ -207,27 +209,21 @@ static int acpi_button_add_fs(struct acpi_device *device) | |||
207 | acpi_device_dir(device)->owner = THIS_MODULE; | 209 | acpi_device_dir(device)->owner = THIS_MODULE; |
208 | 210 | ||
209 | /* 'info' [R] */ | 211 | /* 'info' [R] */ |
210 | entry = create_proc_entry(ACPI_BUTTON_FILE_INFO, | 212 | entry = proc_create_data(ACPI_BUTTON_FILE_INFO, |
211 | S_IRUGO, acpi_device_dir(device)); | 213 | S_IRUGO, acpi_device_dir(device), |
214 | &acpi_button_info_fops, | ||
215 | acpi_driver_data(device)); | ||
212 | if (!entry) | 216 | if (!entry) |
213 | return -ENODEV; | 217 | return -ENODEV; |
214 | else { | ||
215 | entry->proc_fops = &acpi_button_info_fops; | ||
216 | entry->data = acpi_driver_data(device); | ||
217 | entry->owner = THIS_MODULE; | ||
218 | } | ||
219 | 218 | ||
220 | /* show lid state [R] */ | 219 | /* show lid state [R] */ |
221 | if (button->type == ACPI_BUTTON_TYPE_LID) { | 220 | if (button->type == ACPI_BUTTON_TYPE_LID) { |
222 | entry = create_proc_entry(ACPI_BUTTON_FILE_STATE, | 221 | entry = proc_create_data(ACPI_BUTTON_FILE_STATE, |
223 | S_IRUGO, acpi_device_dir(device)); | 222 | S_IRUGO, acpi_device_dir(device), |
223 | &acpi_button_state_fops, | ||
224 | acpi_driver_data(device)); | ||
224 | if (!entry) | 225 | if (!entry) |
225 | return -ENODEV; | 226 | return -ENODEV; |
226 | else { | ||
227 | entry->proc_fops = &acpi_button_state_fops; | ||
228 | entry->data = acpi_driver_data(device); | ||
229 | entry->owner = THIS_MODULE; | ||
230 | } | ||
231 | } | 227 | } |
232 | 228 | ||
233 | return 0; | 229 | return 0; |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 7222a18a0319..e3f04b272f3f 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -669,16 +669,11 @@ static int acpi_ec_add_fs(struct acpi_device *device) | |||
669 | return -ENODEV; | 669 | return -ENODEV; |
670 | } | 670 | } |
671 | 671 | ||
672 | entry = create_proc_entry(ACPI_EC_FILE_INFO, S_IRUGO, | 672 | entry = proc_create_data(ACPI_EC_FILE_INFO, S_IRUGO, |
673 | acpi_device_dir(device)); | 673 | acpi_device_dir(device), |
674 | &acpi_ec_info_ops, acpi_driver_data(device)); | ||
674 | if (!entry) | 675 | if (!entry) |
675 | return -ENODEV; | 676 | return -ENODEV; |
676 | else { | ||
677 | entry->proc_fops = &acpi_ec_info_ops; | ||
678 | entry->data = acpi_driver_data(device); | ||
679 | entry->owner = THIS_MODULE; | ||
680 | } | ||
681 | |||
682 | return 0; | 677 | return 0; |
683 | } | 678 | } |
684 | 679 | ||
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index abec1ca94cf4..0c24bd4d6562 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c | |||
@@ -102,6 +102,7 @@ static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | static const struct file_operations acpi_system_event_ops = { | 104 | static const struct file_operations acpi_system_event_ops = { |
105 | .owner = THIS_MODULE, | ||
105 | .open = acpi_system_open_event, | 106 | .open = acpi_system_open_event, |
106 | .read = acpi_system_read_event, | 107 | .read = acpi_system_read_event, |
107 | .release = acpi_system_close_event, | 108 | .release = acpi_system_close_event, |
@@ -294,10 +295,9 @@ static int __init acpi_event_init(void) | |||
294 | 295 | ||
295 | #ifdef CONFIG_ACPI_PROC_EVENT | 296 | #ifdef CONFIG_ACPI_PROC_EVENT |
296 | /* 'event' [R] */ | 297 | /* 'event' [R] */ |
297 | entry = create_proc_entry("event", S_IRUSR, acpi_root_dir); | 298 | entry = proc_create("event", S_IRUSR, acpi_root_dir, |
298 | if (entry) | 299 | &acpi_system_event_ops); |
299 | entry->proc_fops = &acpi_system_event_ops; | 300 | if (!entry) |
300 | else | ||
301 | return -ENODEV; | 301 | return -ENODEV; |
302 | #endif | 302 | #endif |
303 | 303 | ||
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index c8e3cba423ef..194077ab9b85 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c | |||
@@ -192,17 +192,13 @@ static int acpi_fan_add_fs(struct acpi_device *device) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | /* 'status' [R/W] */ | 194 | /* 'status' [R/W] */ |
195 | entry = create_proc_entry(ACPI_FAN_FILE_STATE, | 195 | entry = proc_create_data(ACPI_FAN_FILE_STATE, |
196 | S_IFREG | S_IRUGO | S_IWUSR, | 196 | S_IFREG | S_IRUGO | S_IWUSR, |
197 | acpi_device_dir(device)); | 197 | acpi_device_dir(device), |
198 | &acpi_fan_state_ops, | ||
199 | device); | ||
198 | if (!entry) | 200 | if (!entry) |
199 | return -ENODEV; | 201 | return -ENODEV; |
200 | else { | ||
201 | entry->proc_fops = &acpi_fan_state_ops; | ||
202 | entry->data = device; | ||
203 | entry->owner = THIS_MODULE; | ||
204 | } | ||
205 | |||
206 | return 0; | 202 | return 0; |
207 | } | 203 | } |
208 | 204 | ||
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 76bf6d90c700..21fc8bf0d31f 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -93,6 +93,7 @@ struct acpi_power_resource { | |||
93 | static struct list_head acpi_power_resource_list; | 93 | static struct list_head acpi_power_resource_list; |
94 | 94 | ||
95 | static const struct file_operations acpi_power_fops = { | 95 | static const struct file_operations acpi_power_fops = { |
96 | .owner = THIS_MODULE, | ||
96 | .open = acpi_power_open_fs, | 97 | .open = acpi_power_open_fs, |
97 | .read = seq_read, | 98 | .read = seq_read, |
98 | .llseek = seq_lseek, | 99 | .llseek = seq_lseek, |
@@ -543,15 +544,11 @@ static int acpi_power_add_fs(struct acpi_device *device) | |||
543 | } | 544 | } |
544 | 545 | ||
545 | /* 'status' [R] */ | 546 | /* 'status' [R] */ |
546 | entry = create_proc_entry(ACPI_POWER_FILE_STATUS, | 547 | entry = proc_create_data(ACPI_POWER_FILE_STATUS, |
547 | S_IRUGO, acpi_device_dir(device)); | 548 | S_IRUGO, acpi_device_dir(device), |
549 | &acpi_power_fops, acpi_driver_data(device)); | ||
548 | if (!entry) | 550 | if (!entry) |
549 | return -EIO; | 551 | return -EIO; |
550 | else { | ||
551 | entry->proc_fops = &acpi_power_fops; | ||
552 | entry->data = acpi_driver_data(device); | ||
553 | } | ||
554 | |||
555 | return 0; | 552 | return 0; |
556 | } | 553 | } |
557 | 554 | ||
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index a825b431b64f..dd28c912e84f 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -112,6 +112,7 @@ static struct acpi_driver acpi_processor_driver = { | |||
112 | #define UNINSTALL_NOTIFY_HANDLER 2 | 112 | #define UNINSTALL_NOTIFY_HANDLER 2 |
113 | 113 | ||
114 | static const struct file_operations acpi_processor_info_fops = { | 114 | static const struct file_operations acpi_processor_info_fops = { |
115 | .owner = THIS_MODULE, | ||
115 | .open = acpi_processor_info_open_fs, | 116 | .open = acpi_processor_info_open_fs, |
116 | .read = seq_read, | 117 | .read = seq_read, |
117 | .llseek = seq_lseek, | 118 | .llseek = seq_lseek, |
@@ -326,40 +327,30 @@ static int acpi_processor_add_fs(struct acpi_device *device) | |||
326 | acpi_device_dir(device)->owner = THIS_MODULE; | 327 | acpi_device_dir(device)->owner = THIS_MODULE; |
327 | 328 | ||
328 | /* 'info' [R] */ | 329 | /* 'info' [R] */ |
329 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO, | 330 | entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO, |
330 | S_IRUGO, acpi_device_dir(device)); | 331 | S_IRUGO, acpi_device_dir(device), |
332 | &acpi_processor_info_fops, | ||
333 | acpi_driver_data(device)); | ||
331 | if (!entry) | 334 | if (!entry) |
332 | return -EIO; | 335 | return -EIO; |
333 | else { | ||
334 | entry->proc_fops = &acpi_processor_info_fops; | ||
335 | entry->data = acpi_driver_data(device); | ||
336 | entry->owner = THIS_MODULE; | ||
337 | } | ||
338 | 336 | ||
339 | /* 'throttling' [R/W] */ | 337 | /* 'throttling' [R/W] */ |
340 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, | 338 | entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING, |
341 | S_IFREG | S_IRUGO | S_IWUSR, | 339 | S_IFREG | S_IRUGO | S_IWUSR, |
342 | acpi_device_dir(device)); | 340 | acpi_device_dir(device), |
341 | &acpi_processor_throttling_fops, | ||
342 | acpi_driver_data(device)); | ||
343 | if (!entry) | 343 | if (!entry) |
344 | return -EIO; | 344 | return -EIO; |
345 | else { | ||
346 | entry->proc_fops = &acpi_processor_throttling_fops; | ||
347 | entry->data = acpi_driver_data(device); | ||
348 | entry->owner = THIS_MODULE; | ||
349 | } | ||
350 | 345 | ||
351 | /* 'limit' [R/W] */ | 346 | /* 'limit' [R/W] */ |
352 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, | 347 | entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT, |
353 | S_IFREG | S_IRUGO | S_IWUSR, | 348 | S_IFREG | S_IRUGO | S_IWUSR, |
354 | acpi_device_dir(device)); | 349 | acpi_device_dir(device), |
350 | &acpi_processor_limit_fops, | ||
351 | acpi_driver_data(device)); | ||
355 | if (!entry) | 352 | if (!entry) |
356 | return -EIO; | 353 | return -EIO; |
357 | else { | ||
358 | entry->proc_fops = &acpi_processor_limit_fops; | ||
359 | entry->data = acpi_driver_data(device); | ||
360 | entry->owner = THIS_MODULE; | ||
361 | } | ||
362 | |||
363 | return 0; | 354 | return 0; |
364 | } | 355 | } |
365 | 356 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 0d90ff5fd117..789d4947ed31 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -1282,6 +1282,7 @@ static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) | |||
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | static const struct file_operations acpi_processor_power_fops = { | 1284 | static const struct file_operations acpi_processor_power_fops = { |
1285 | .owner = THIS_MODULE, | ||
1285 | .open = acpi_processor_power_open_fs, | 1286 | .open = acpi_processor_power_open_fs, |
1286 | .read = seq_read, | 1287 | .read = seq_read, |
1287 | .llseek = seq_lseek, | 1288 | .llseek = seq_lseek, |
@@ -1822,16 +1823,12 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1822 | } | 1823 | } |
1823 | 1824 | ||
1824 | /* 'power' [R] */ | 1825 | /* 'power' [R] */ |
1825 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, | 1826 | entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER, |
1826 | S_IRUGO, acpi_device_dir(device)); | 1827 | S_IRUGO, acpi_device_dir(device), |
1828 | &acpi_processor_power_fops, | ||
1829 | acpi_driver_data(device)); | ||
1827 | if (!entry) | 1830 | if (!entry) |
1828 | return -EIO; | 1831 | return -EIO; |
1829 | else { | ||
1830 | entry->proc_fops = &acpi_processor_power_fops; | ||
1831 | entry->data = acpi_driver_data(device); | ||
1832 | entry->owner = THIS_MODULE; | ||
1833 | } | ||
1834 | |||
1835 | return 0; | 1832 | return 0; |
1836 | } | 1833 | } |
1837 | 1834 | ||
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index b477a4be8a69..d80b2d1441af 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -411,6 +411,7 @@ EXPORT_SYMBOL(acpi_processor_notify_smm); | |||
411 | 411 | ||
412 | static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file); | 412 | static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file); |
413 | static struct file_operations acpi_processor_perf_fops = { | 413 | static struct file_operations acpi_processor_perf_fops = { |
414 | .owner = THIS_MODULE, | ||
414 | .open = acpi_processor_perf_open_fs, | 415 | .open = acpi_processor_perf_open_fs, |
415 | .read = seq_read, | 416 | .read = seq_read, |
416 | .llseek = seq_lseek, | 417 | .llseek = seq_lseek, |
@@ -456,7 +457,6 @@ static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file) | |||
456 | 457 | ||
457 | static void acpi_cpufreq_add_file(struct acpi_processor *pr) | 458 | static void acpi_cpufreq_add_file(struct acpi_processor *pr) |
458 | { | 459 | { |
459 | struct proc_dir_entry *entry = NULL; | ||
460 | struct acpi_device *device = NULL; | 460 | struct acpi_device *device = NULL; |
461 | 461 | ||
462 | 462 | ||
@@ -464,14 +464,9 @@ static void acpi_cpufreq_add_file(struct acpi_processor *pr) | |||
464 | return; | 464 | return; |
465 | 465 | ||
466 | /* add file 'performance' [R/W] */ | 466 | /* add file 'performance' [R/W] */ |
467 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, | 467 | proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO, |
468 | S_IFREG | S_IRUGO, | 468 | acpi_device_dir(device), |
469 | acpi_device_dir(device)); | 469 | &acpi_processor_perf_fops, acpi_driver_data(device)); |
470 | if (entry){ | ||
471 | entry->proc_fops = &acpi_processor_perf_fops; | ||
472 | entry->data = acpi_driver_data(device); | ||
473 | entry->owner = THIS_MODULE; | ||
474 | } | ||
475 | return; | 470 | return; |
476 | } | 471 | } |
477 | 472 | ||
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 649ae99b9216..ef34b18f95ca 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c | |||
@@ -509,6 +509,7 @@ static ssize_t acpi_processor_write_limit(struct file * file, | |||
509 | } | 509 | } |
510 | 510 | ||
511 | struct file_operations acpi_processor_limit_fops = { | 511 | struct file_operations acpi_processor_limit_fops = { |
512 | .owner = THIS_MODULE, | ||
512 | .open = acpi_processor_limit_open_fs, | 513 | .open = acpi_processor_limit_open_fs, |
513 | .read = seq_read, | 514 | .read = seq_read, |
514 | .write = acpi_processor_write_limit, | 515 | .write = acpi_processor_write_limit, |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 0bba3a914e86..bb06738860c4 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -1252,6 +1252,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file, | |||
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | struct file_operations acpi_processor_throttling_fops = { | 1254 | struct file_operations acpi_processor_throttling_fops = { |
1255 | .owner = THIS_MODULE, | ||
1255 | .open = acpi_processor_throttling_open_fs, | 1256 | .open = acpi_processor_throttling_open_fs, |
1256 | .read = seq_read, | 1257 | .read = seq_read, |
1257 | .write = acpi_processor_write_throttling, | 1258 | .write = acpi_processor_write_throttling, |
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 585ae3c9c8ea..10a36512647c 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
@@ -483,8 +483,6 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir, | |||
483 | struct file_operations *state_fops, | 483 | struct file_operations *state_fops, |
484 | struct file_operations *alarm_fops, void *data) | 484 | struct file_operations *alarm_fops, void *data) |
485 | { | 485 | { |
486 | struct proc_dir_entry *entry = NULL; | ||
487 | |||
488 | if (!*dir) { | 486 | if (!*dir) { |
489 | *dir = proc_mkdir(dir_name, parent_dir); | 487 | *dir = proc_mkdir(dir_name, parent_dir); |
490 | if (!*dir) { | 488 | if (!*dir) { |
@@ -494,34 +492,19 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir, | |||
494 | } | 492 | } |
495 | 493 | ||
496 | /* 'info' [R] */ | 494 | /* 'info' [R] */ |
497 | if (info_fops) { | 495 | if (info_fops) |
498 | entry = create_proc_entry(ACPI_SBS_FILE_INFO, S_IRUGO, *dir); | 496 | proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir, |
499 | if (entry) { | 497 | info_fops, data); |
500 | entry->proc_fops = info_fops; | ||
501 | entry->data = data; | ||
502 | entry->owner = THIS_MODULE; | ||
503 | } | ||
504 | } | ||
505 | 498 | ||
506 | /* 'state' [R] */ | 499 | /* 'state' [R] */ |
507 | if (state_fops) { | 500 | if (state_fops) |
508 | entry = create_proc_entry(ACPI_SBS_FILE_STATE, S_IRUGO, *dir); | 501 | proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir, |
509 | if (entry) { | 502 | state_fops, data); |
510 | entry->proc_fops = state_fops; | ||
511 | entry->data = data; | ||
512 | entry->owner = THIS_MODULE; | ||
513 | } | ||
514 | } | ||
515 | 503 | ||
516 | /* 'alarm' [R/W] */ | 504 | /* 'alarm' [R/W] */ |
517 | if (alarm_fops) { | 505 | if (alarm_fops) |
518 | entry = create_proc_entry(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir); | 506 | proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir, |
519 | if (entry) { | 507 | alarm_fops, data); |
520 | entry->proc_fops = alarm_fops; | ||
521 | entry->data = data; | ||
522 | entry->owner = THIS_MODULE; | ||
523 | } | ||
524 | } | ||
525 | return 0; | 508 | return 0; |
526 | } | 509 | } |
527 | 510 | ||
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index f8df5217d477..8a5fe8710513 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c | |||
@@ -440,6 +440,7 @@ acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file) | |||
440 | } | 440 | } |
441 | 441 | ||
442 | static const struct file_operations acpi_system_wakeup_device_fops = { | 442 | static const struct file_operations acpi_system_wakeup_device_fops = { |
443 | .owner = THIS_MODULE, | ||
443 | .open = acpi_system_wakeup_device_open_fs, | 444 | .open = acpi_system_wakeup_device_open_fs, |
444 | .read = seq_read, | 445 | .read = seq_read, |
445 | .write = acpi_system_write_wakeup_device, | 446 | .write = acpi_system_write_wakeup_device, |
@@ -449,6 +450,7 @@ static const struct file_operations acpi_system_wakeup_device_fops = { | |||
449 | 450 | ||
450 | #ifdef CONFIG_ACPI_PROCFS | 451 | #ifdef CONFIG_ACPI_PROCFS |
451 | static const struct file_operations acpi_system_sleep_fops = { | 452 | static const struct file_operations acpi_system_sleep_fops = { |
453 | .owner = THIS_MODULE, | ||
452 | .open = acpi_system_sleep_open_fs, | 454 | .open = acpi_system_sleep_open_fs, |
453 | .read = seq_read, | 455 | .read = seq_read, |
454 | .write = acpi_system_write_sleep, | 456 | .write = acpi_system_write_sleep, |
@@ -459,6 +461,7 @@ static const struct file_operations acpi_system_sleep_fops = { | |||
459 | 461 | ||
460 | #ifdef HAVE_ACPI_LEGACY_ALARM | 462 | #ifdef HAVE_ACPI_LEGACY_ALARM |
461 | static const struct file_operations acpi_system_alarm_fops = { | 463 | static const struct file_operations acpi_system_alarm_fops = { |
464 | .owner = THIS_MODULE, | ||
462 | .open = acpi_system_alarm_open_fs, | 465 | .open = acpi_system_alarm_open_fs, |
463 | .read = seq_read, | 466 | .read = seq_read, |
464 | .write = acpi_system_write_alarm, | 467 | .write = acpi_system_write_alarm, |
@@ -477,37 +480,26 @@ static u32 rtc_handler(void *context) | |||
477 | 480 | ||
478 | static int __init acpi_sleep_proc_init(void) | 481 | static int __init acpi_sleep_proc_init(void) |
479 | { | 482 | { |
480 | struct proc_dir_entry *entry = NULL; | ||
481 | |||
482 | if (acpi_disabled) | 483 | if (acpi_disabled) |
483 | return 0; | 484 | return 0; |
484 | 485 | ||
485 | #ifdef CONFIG_ACPI_PROCFS | 486 | #ifdef CONFIG_ACPI_PROCFS |
486 | /* 'sleep' [R/W] */ | 487 | /* 'sleep' [R/W] */ |
487 | entry = | 488 | proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR, |
488 | create_proc_entry("sleep", S_IFREG | S_IRUGO | S_IWUSR, | 489 | acpi_root_dir, &acpi_system_sleep_fops); |
489 | acpi_root_dir); | ||
490 | if (entry) | ||
491 | entry->proc_fops = &acpi_system_sleep_fops; | ||
492 | #endif /* CONFIG_ACPI_PROCFS */ | 490 | #endif /* CONFIG_ACPI_PROCFS */ |
493 | 491 | ||
494 | #ifdef HAVE_ACPI_LEGACY_ALARM | 492 | #ifdef HAVE_ACPI_LEGACY_ALARM |
495 | /* 'alarm' [R/W] */ | 493 | /* 'alarm' [R/W] */ |
496 | entry = | 494 | proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR, |
497 | create_proc_entry("alarm", S_IFREG | S_IRUGO | S_IWUSR, | 495 | acpi_root_dir, &acpi_system_alarm_fops); |
498 | acpi_root_dir); | ||
499 | if (entry) | ||
500 | entry->proc_fops = &acpi_system_alarm_fops; | ||
501 | 496 | ||
502 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); | 497 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); |
503 | #endif /* HAVE_ACPI_LEGACY_ALARM */ | 498 | #endif /* HAVE_ACPI_LEGACY_ALARM */ |
504 | 499 | ||
505 | /* 'wakeup device' [R/W] */ | 500 | /* 'wakeup device' [R/W] */ |
506 | entry = | 501 | proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR, |
507 | create_proc_entry("wakeup", S_IFREG | S_IRUGO | S_IWUSR, | 502 | acpi_root_dir, &acpi_system_wakeup_device_fops); |
508 | acpi_root_dir); | ||
509 | if (entry) | ||
510 | entry->proc_fops = &acpi_system_wakeup_device_fops; | ||
511 | 503 | ||
512 | return 0; | 504 | return 0; |
513 | } | 505 | } |
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 4749f379a915..769f24855eb6 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c | |||
@@ -396,6 +396,7 @@ static int acpi_system_info_open_fs(struct inode *inode, struct file *file) | |||
396 | } | 396 | } |
397 | 397 | ||
398 | static const struct file_operations acpi_system_info_ops = { | 398 | static const struct file_operations acpi_system_info_ops = { |
399 | .owner = THIS_MODULE, | ||
399 | .open = acpi_system_info_open_fs, | 400 | .open = acpi_system_info_open_fs, |
400 | .read = seq_read, | 401 | .read = seq_read, |
401 | .llseek = seq_lseek, | 402 | .llseek = seq_lseek, |
@@ -406,6 +407,7 @@ static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t, | |||
406 | loff_t *); | 407 | loff_t *); |
407 | 408 | ||
408 | static const struct file_operations acpi_system_dsdt_ops = { | 409 | static const struct file_operations acpi_system_dsdt_ops = { |
410 | .owner = THIS_MODULE, | ||
409 | .read = acpi_system_read_dsdt, | 411 | .read = acpi_system_read_dsdt, |
410 | }; | 412 | }; |
411 | 413 | ||
@@ -430,6 +432,7 @@ static ssize_t acpi_system_read_fadt(struct file *, char __user *, size_t, | |||
430 | loff_t *); | 432 | loff_t *); |
431 | 433 | ||
432 | static const struct file_operations acpi_system_fadt_ops = { | 434 | static const struct file_operations acpi_system_fadt_ops = { |
435 | .owner = THIS_MODULE, | ||
433 | .read = acpi_system_read_fadt, | 436 | .read = acpi_system_read_fadt, |
434 | }; | 437 | }; |
435 | 438 | ||
@@ -454,31 +457,23 @@ static int acpi_system_procfs_init(void) | |||
454 | { | 457 | { |
455 | struct proc_dir_entry *entry; | 458 | struct proc_dir_entry *entry; |
456 | int error = 0; | 459 | int error = 0; |
457 | char *name; | ||
458 | 460 | ||
459 | /* 'info' [R] */ | 461 | /* 'info' [R] */ |
460 | name = ACPI_SYSTEM_FILE_INFO; | 462 | entry = proc_create(ACPI_SYSTEM_FILE_INFO, S_IRUGO, acpi_root_dir, |
461 | entry = create_proc_entry(name, S_IRUGO, acpi_root_dir); | 463 | &acpi_system_info_ops); |
462 | if (!entry) | 464 | if (!entry) |
463 | goto Error; | 465 | goto Error; |
464 | else { | ||
465 | entry->proc_fops = &acpi_system_info_ops; | ||
466 | } | ||
467 | 466 | ||
468 | /* 'dsdt' [R] */ | 467 | /* 'dsdt' [R] */ |
469 | name = ACPI_SYSTEM_FILE_DSDT; | 468 | entry = proc_create(ACPI_SYSTEM_FILE_DSDT, S_IRUSR, acpi_root_dir, |
470 | entry = create_proc_entry(name, S_IRUSR, acpi_root_dir); | 469 | &acpi_system_dsdt_ops); |
471 | if (entry) | 470 | if (!entry) |
472 | entry->proc_fops = &acpi_system_dsdt_ops; | ||
473 | else | ||
474 | goto Error; | 471 | goto Error; |
475 | 472 | ||
476 | /* 'fadt' [R] */ | 473 | /* 'fadt' [R] */ |
477 | name = ACPI_SYSTEM_FILE_FADT; | 474 | entry = proc_create(ACPI_SYSTEM_FILE_FADT, S_IRUSR, acpi_root_dir, |
478 | entry = create_proc_entry(name, S_IRUSR, acpi_root_dir); | 475 | &acpi_system_fadt_ops); |
479 | if (entry) | 476 | if (!entry) |
480 | entry->proc_fops = &acpi_system_fadt_ops; | ||
481 | else | ||
482 | goto Error; | 477 | goto Error; |
483 | 478 | ||
484 | Done: | 479 | Done: |
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 766bd25d3376..0815ac3ae3d6 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -198,6 +198,7 @@ struct acpi_thermal { | |||
198 | }; | 198 | }; |
199 | 199 | ||
200 | static const struct file_operations acpi_thermal_state_fops = { | 200 | static const struct file_operations acpi_thermal_state_fops = { |
201 | .owner = THIS_MODULE, | ||
201 | .open = acpi_thermal_state_open_fs, | 202 | .open = acpi_thermal_state_open_fs, |
202 | .read = seq_read, | 203 | .read = seq_read, |
203 | .llseek = seq_lseek, | 204 | .llseek = seq_lseek, |
@@ -205,6 +206,7 @@ static const struct file_operations acpi_thermal_state_fops = { | |||
205 | }; | 206 | }; |
206 | 207 | ||
207 | static const struct file_operations acpi_thermal_temp_fops = { | 208 | static const struct file_operations acpi_thermal_temp_fops = { |
209 | .owner = THIS_MODULE, | ||
208 | .open = acpi_thermal_temp_open_fs, | 210 | .open = acpi_thermal_temp_open_fs, |
209 | .read = seq_read, | 211 | .read = seq_read, |
210 | .llseek = seq_lseek, | 212 | .llseek = seq_lseek, |
@@ -212,6 +214,7 @@ static const struct file_operations acpi_thermal_temp_fops = { | |||
212 | }; | 214 | }; |
213 | 215 | ||
214 | static const struct file_operations acpi_thermal_trip_fops = { | 216 | static const struct file_operations acpi_thermal_trip_fops = { |
217 | .owner = THIS_MODULE, | ||
215 | .open = acpi_thermal_trip_open_fs, | 218 | .open = acpi_thermal_trip_open_fs, |
216 | .read = seq_read, | 219 | .read = seq_read, |
217 | .llseek = seq_lseek, | 220 | .llseek = seq_lseek, |
@@ -219,6 +222,7 @@ static const struct file_operations acpi_thermal_trip_fops = { | |||
219 | }; | 222 | }; |
220 | 223 | ||
221 | static const struct file_operations acpi_thermal_cooling_fops = { | 224 | static const struct file_operations acpi_thermal_cooling_fops = { |
225 | .owner = THIS_MODULE, | ||
222 | .open = acpi_thermal_cooling_open_fs, | 226 | .open = acpi_thermal_cooling_open_fs, |
223 | .read = seq_read, | 227 | .read = seq_read, |
224 | .write = acpi_thermal_write_cooling_mode, | 228 | .write = acpi_thermal_write_cooling_mode, |
@@ -227,6 +231,7 @@ static const struct file_operations acpi_thermal_cooling_fops = { | |||
227 | }; | 231 | }; |
228 | 232 | ||
229 | static const struct file_operations acpi_thermal_polling_fops = { | 233 | static const struct file_operations acpi_thermal_polling_fops = { |
234 | .owner = THIS_MODULE, | ||
230 | .open = acpi_thermal_polling_open_fs, | 235 | .open = acpi_thermal_polling_open_fs, |
231 | .read = seq_read, | 236 | .read = seq_read, |
232 | .write = acpi_thermal_write_polling, | 237 | .write = acpi_thermal_write_polling, |
@@ -1419,63 +1424,47 @@ static int acpi_thermal_add_fs(struct acpi_device *device) | |||
1419 | } | 1424 | } |
1420 | 1425 | ||
1421 | /* 'state' [R] */ | 1426 | /* 'state' [R] */ |
1422 | entry = create_proc_entry(ACPI_THERMAL_FILE_STATE, | 1427 | entry = proc_create_data(ACPI_THERMAL_FILE_STATE, |
1423 | S_IRUGO, acpi_device_dir(device)); | 1428 | S_IRUGO, acpi_device_dir(device), |
1429 | &acpi_thermal_state_fops, | ||
1430 | acpi_driver_data(device)); | ||
1424 | if (!entry) | 1431 | if (!entry) |
1425 | return -ENODEV; | 1432 | return -ENODEV; |
1426 | else { | ||
1427 | entry->proc_fops = &acpi_thermal_state_fops; | ||
1428 | entry->data = acpi_driver_data(device); | ||
1429 | entry->owner = THIS_MODULE; | ||
1430 | } | ||
1431 | 1433 | ||
1432 | /* 'temperature' [R] */ | 1434 | /* 'temperature' [R] */ |
1433 | entry = create_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE, | 1435 | entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE, |
1434 | S_IRUGO, acpi_device_dir(device)); | 1436 | S_IRUGO, acpi_device_dir(device), |
1437 | &acpi_thermal_temp_fops, | ||
1438 | acpi_driver_data(device)); | ||
1435 | if (!entry) | 1439 | if (!entry) |
1436 | return -ENODEV; | 1440 | return -ENODEV; |
1437 | else { | ||
1438 | entry->proc_fops = &acpi_thermal_temp_fops; | ||
1439 | entry->data = acpi_driver_data(device); | ||
1440 | entry->owner = THIS_MODULE; | ||
1441 | } | ||
1442 | 1441 | ||
1443 | /* 'trip_points' [R] */ | 1442 | /* 'trip_points' [R] */ |
1444 | entry = create_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS, | 1443 | entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS, |
1445 | S_IRUGO, | 1444 | S_IRUGO, |
1446 | acpi_device_dir(device)); | 1445 | acpi_device_dir(device), |
1446 | &acpi_thermal_trip_fops, | ||
1447 | acpi_driver_data(device)); | ||
1447 | if (!entry) | 1448 | if (!entry) |
1448 | return -ENODEV; | 1449 | return -ENODEV; |
1449 | else { | ||
1450 | entry->proc_fops = &acpi_thermal_trip_fops; | ||
1451 | entry->data = acpi_driver_data(device); | ||
1452 | entry->owner = THIS_MODULE; | ||
1453 | } | ||
1454 | 1450 | ||
1455 | /* 'cooling_mode' [R/W] */ | 1451 | /* 'cooling_mode' [R/W] */ |
1456 | entry = create_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE, | 1452 | entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE, |
1457 | S_IFREG | S_IRUGO | S_IWUSR, | 1453 | S_IFREG | S_IRUGO | S_IWUSR, |
1458 | acpi_device_dir(device)); | 1454 | acpi_device_dir(device), |
1455 | &acpi_thermal_cooling_fops, | ||
1456 | acpi_driver_data(device)); | ||
1459 | if (!entry) | 1457 | if (!entry) |
1460 | return -ENODEV; | 1458 | return -ENODEV; |
1461 | else { | ||
1462 | entry->proc_fops = &acpi_thermal_cooling_fops; | ||
1463 | entry->data = acpi_driver_data(device); | ||
1464 | entry->owner = THIS_MODULE; | ||
1465 | } | ||
1466 | 1459 | ||
1467 | /* 'polling_frequency' [R/W] */ | 1460 | /* 'polling_frequency' [R/W] */ |
1468 | entry = create_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ, | 1461 | entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ, |
1469 | S_IFREG | S_IRUGO | S_IWUSR, | 1462 | S_IFREG | S_IRUGO | S_IWUSR, |
1470 | acpi_device_dir(device)); | 1463 | acpi_device_dir(device), |
1464 | &acpi_thermal_polling_fops, | ||
1465 | acpi_driver_data(device)); | ||
1471 | if (!entry) | 1466 | if (!entry) |
1472 | return -ENODEV; | 1467 | return -ENODEV; |
1473 | else { | ||
1474 | entry->proc_fops = &acpi_thermal_polling_fops; | ||
1475 | entry->data = acpi_driver_data(device); | ||
1476 | entry->owner = THIS_MODULE; | ||
1477 | } | ||
1478 | |||
1479 | return 0; | 1468 | return 0; |
1480 | } | 1469 | } |
1481 | 1470 | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 980a74188781..43b228314a86 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -192,6 +192,7 @@ struct acpi_video_device { | |||
192 | /* bus */ | 192 | /* bus */ |
193 | static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file); | 193 | static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file); |
194 | static struct file_operations acpi_video_bus_info_fops = { | 194 | static struct file_operations acpi_video_bus_info_fops = { |
195 | .owner = THIS_MODULE, | ||
195 | .open = acpi_video_bus_info_open_fs, | 196 | .open = acpi_video_bus_info_open_fs, |
196 | .read = seq_read, | 197 | .read = seq_read, |
197 | .llseek = seq_lseek, | 198 | .llseek = seq_lseek, |
@@ -200,6 +201,7 @@ static struct file_operations acpi_video_bus_info_fops = { | |||
200 | 201 | ||
201 | static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file); | 202 | static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file); |
202 | static struct file_operations acpi_video_bus_ROM_fops = { | 203 | static struct file_operations acpi_video_bus_ROM_fops = { |
204 | .owner = THIS_MODULE, | ||
203 | .open = acpi_video_bus_ROM_open_fs, | 205 | .open = acpi_video_bus_ROM_open_fs, |
204 | .read = seq_read, | 206 | .read = seq_read, |
205 | .llseek = seq_lseek, | 207 | .llseek = seq_lseek, |
@@ -209,6 +211,7 @@ static struct file_operations acpi_video_bus_ROM_fops = { | |||
209 | static int acpi_video_bus_POST_info_open_fs(struct inode *inode, | 211 | static int acpi_video_bus_POST_info_open_fs(struct inode *inode, |
210 | struct file *file); | 212 | struct file *file); |
211 | static struct file_operations acpi_video_bus_POST_info_fops = { | 213 | static struct file_operations acpi_video_bus_POST_info_fops = { |
214 | .owner = THIS_MODULE, | ||
212 | .open = acpi_video_bus_POST_info_open_fs, | 215 | .open = acpi_video_bus_POST_info_open_fs, |
213 | .read = seq_read, | 216 | .read = seq_read, |
214 | .llseek = seq_lseek, | 217 | .llseek = seq_lseek, |
@@ -217,6 +220,7 @@ static struct file_operations acpi_video_bus_POST_info_fops = { | |||
217 | 220 | ||
218 | static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file); | 221 | static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file); |
219 | static struct file_operations acpi_video_bus_POST_fops = { | 222 | static struct file_operations acpi_video_bus_POST_fops = { |
223 | .owner = THIS_MODULE, | ||
220 | .open = acpi_video_bus_POST_open_fs, | 224 | .open = acpi_video_bus_POST_open_fs, |
221 | .read = seq_read, | 225 | .read = seq_read, |
222 | .llseek = seq_lseek, | 226 | .llseek = seq_lseek, |
@@ -225,6 +229,7 @@ static struct file_operations acpi_video_bus_POST_fops = { | |||
225 | 229 | ||
226 | static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file); | 230 | static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file); |
227 | static struct file_operations acpi_video_bus_DOS_fops = { | 231 | static struct file_operations acpi_video_bus_DOS_fops = { |
232 | .owner = THIS_MODULE, | ||
228 | .open = acpi_video_bus_DOS_open_fs, | 233 | .open = acpi_video_bus_DOS_open_fs, |
229 | .read = seq_read, | 234 | .read = seq_read, |
230 | .llseek = seq_lseek, | 235 | .llseek = seq_lseek, |
@@ -235,6 +240,7 @@ static struct file_operations acpi_video_bus_DOS_fops = { | |||
235 | static int acpi_video_device_info_open_fs(struct inode *inode, | 240 | static int acpi_video_device_info_open_fs(struct inode *inode, |
236 | struct file *file); | 241 | struct file *file); |
237 | static struct file_operations acpi_video_device_info_fops = { | 242 | static struct file_operations acpi_video_device_info_fops = { |
243 | .owner = THIS_MODULE, | ||
238 | .open = acpi_video_device_info_open_fs, | 244 | .open = acpi_video_device_info_open_fs, |
239 | .read = seq_read, | 245 | .read = seq_read, |
240 | .llseek = seq_lseek, | 246 | .llseek = seq_lseek, |
@@ -244,6 +250,7 @@ static struct file_operations acpi_video_device_info_fops = { | |||
244 | static int acpi_video_device_state_open_fs(struct inode *inode, | 250 | static int acpi_video_device_state_open_fs(struct inode *inode, |
245 | struct file *file); | 251 | struct file *file); |
246 | static struct file_operations acpi_video_device_state_fops = { | 252 | static struct file_operations acpi_video_device_state_fops = { |
253 | .owner = THIS_MODULE, | ||
247 | .open = acpi_video_device_state_open_fs, | 254 | .open = acpi_video_device_state_open_fs, |
248 | .read = seq_read, | 255 | .read = seq_read, |
249 | .llseek = seq_lseek, | 256 | .llseek = seq_lseek, |
@@ -253,6 +260,7 @@ static struct file_operations acpi_video_device_state_fops = { | |||
253 | static int acpi_video_device_brightness_open_fs(struct inode *inode, | 260 | static int acpi_video_device_brightness_open_fs(struct inode *inode, |
254 | struct file *file); | 261 | struct file *file); |
255 | static struct file_operations acpi_video_device_brightness_fops = { | 262 | static struct file_operations acpi_video_device_brightness_fops = { |
263 | .owner = THIS_MODULE, | ||
256 | .open = acpi_video_device_brightness_open_fs, | 264 | .open = acpi_video_device_brightness_open_fs, |
257 | .read = seq_read, | 265 | .read = seq_read, |
258 | .llseek = seq_lseek, | 266 | .llseek = seq_lseek, |
@@ -262,6 +270,7 @@ static struct file_operations acpi_video_device_brightness_fops = { | |||
262 | static int acpi_video_device_EDID_open_fs(struct inode *inode, | 270 | static int acpi_video_device_EDID_open_fs(struct inode *inode, |
263 | struct file *file); | 271 | struct file *file); |
264 | static struct file_operations acpi_video_device_EDID_fops = { | 272 | static struct file_operations acpi_video_device_EDID_fops = { |
273 | .owner = THIS_MODULE, | ||
265 | .open = acpi_video_device_EDID_open_fs, | 274 | .open = acpi_video_device_EDID_open_fs, |
266 | .read = seq_read, | 275 | .read = seq_read, |
267 | .llseek = seq_lseek, | 276 | .llseek = seq_lseek, |
@@ -1070,51 +1079,36 @@ static int acpi_video_device_add_fs(struct acpi_device *device) | |||
1070 | } | 1079 | } |
1071 | 1080 | ||
1072 | /* 'info' [R] */ | 1081 | /* 'info' [R] */ |
1073 | entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device)); | 1082 | entry = proc_create_data("info", S_IRUGO, acpi_device_dir(device), |
1083 | &acpi_video_device_info_fops, acpi_driver_data(device)); | ||
1074 | if (!entry) | 1084 | if (!entry) |
1075 | return -ENODEV; | 1085 | return -ENODEV; |
1076 | else { | ||
1077 | entry->proc_fops = &acpi_video_device_info_fops; | ||
1078 | entry->data = acpi_driver_data(device); | ||
1079 | entry->owner = THIS_MODULE; | ||
1080 | } | ||
1081 | 1086 | ||
1082 | /* 'state' [R/W] */ | 1087 | /* 'state' [R/W] */ |
1083 | entry = | 1088 | acpi_video_device_state_fops.write = acpi_video_device_write_state; |
1084 | create_proc_entry("state", S_IFREG | S_IRUGO | S_IWUSR, | 1089 | entry = proc_create_data("state", S_IFREG | S_IRUGO | S_IWUSR, |
1085 | acpi_device_dir(device)); | 1090 | acpi_device_dir(device), |
1091 | &acpi_video_device_state_fops, | ||
1092 | acpi_driver_data(device)); | ||
1086 | if (!entry) | 1093 | if (!entry) |
1087 | return -ENODEV; | 1094 | return -ENODEV; |
1088 | else { | ||
1089 | acpi_video_device_state_fops.write = acpi_video_device_write_state; | ||
1090 | entry->proc_fops = &acpi_video_device_state_fops; | ||
1091 | entry->data = acpi_driver_data(device); | ||
1092 | entry->owner = THIS_MODULE; | ||
1093 | } | ||
1094 | 1095 | ||
1095 | /* 'brightness' [R/W] */ | 1096 | /* 'brightness' [R/W] */ |
1096 | entry = | 1097 | acpi_video_device_brightness_fops.write = |
1097 | create_proc_entry("brightness", S_IFREG | S_IRUGO | S_IWUSR, | 1098 | acpi_video_device_write_brightness; |
1098 | acpi_device_dir(device)); | 1099 | entry = proc_create_data("brightness", S_IFREG | S_IRUGO | S_IWUSR, |
1100 | acpi_device_dir(device), | ||
1101 | &acpi_video_device_brightness_fops, | ||
1102 | acpi_driver_data(device)); | ||
1099 | if (!entry) | 1103 | if (!entry) |
1100 | return -ENODEV; | 1104 | return -ENODEV; |
1101 | else { | ||
1102 | acpi_video_device_brightness_fops.write = acpi_video_device_write_brightness; | ||
1103 | entry->proc_fops = &acpi_video_device_brightness_fops; | ||
1104 | entry->data = acpi_driver_data(device); | ||
1105 | entry->owner = THIS_MODULE; | ||
1106 | } | ||
1107 | 1105 | ||
1108 | /* 'EDID' [R] */ | 1106 | /* 'EDID' [R] */ |
1109 | entry = create_proc_entry("EDID", S_IRUGO, acpi_device_dir(device)); | 1107 | entry = proc_create_data("EDID", S_IRUGO, acpi_device_dir(device), |
1108 | &acpi_video_device_EDID_fops, | ||
1109 | acpi_driver_data(device)); | ||
1110 | if (!entry) | 1110 | if (!entry) |
1111 | return -ENODEV; | 1111 | return -ENODEV; |
1112 | else { | ||
1113 | entry->proc_fops = &acpi_video_device_EDID_fops; | ||
1114 | entry->data = acpi_driver_data(device); | ||
1115 | entry->owner = THIS_MODULE; | ||
1116 | } | ||
1117 | |||
1118 | return 0; | 1112 | return 0; |
1119 | } | 1113 | } |
1120 | 1114 | ||
@@ -1353,61 +1347,43 @@ static int acpi_video_bus_add_fs(struct acpi_device *device) | |||
1353 | } | 1347 | } |
1354 | 1348 | ||
1355 | /* 'info' [R] */ | 1349 | /* 'info' [R] */ |
1356 | entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device)); | 1350 | entry = proc_create_data("info", S_IRUGO, acpi_device_dir(device), |
1351 | &acpi_video_bus_info_fops, | ||
1352 | acpi_driver_data(device)); | ||
1357 | if (!entry) | 1353 | if (!entry) |
1358 | return -ENODEV; | 1354 | return -ENODEV; |
1359 | else { | ||
1360 | entry->proc_fops = &acpi_video_bus_info_fops; | ||
1361 | entry->data = acpi_driver_data(device); | ||
1362 | entry->owner = THIS_MODULE; | ||
1363 | } | ||
1364 | 1355 | ||
1365 | /* 'ROM' [R] */ | 1356 | /* 'ROM' [R] */ |
1366 | entry = create_proc_entry("ROM", S_IRUGO, acpi_device_dir(device)); | 1357 | entry = proc_create_data("ROM", S_IRUGO, acpi_device_dir(device), |
1358 | &acpi_video_bus_ROM_fops, | ||
1359 | acpi_driver_data(device)); | ||
1367 | if (!entry) | 1360 | if (!entry) |
1368 | return -ENODEV; | 1361 | return -ENODEV; |
1369 | else { | ||
1370 | entry->proc_fops = &acpi_video_bus_ROM_fops; | ||
1371 | entry->data = acpi_driver_data(device); | ||
1372 | entry->owner = THIS_MODULE; | ||
1373 | } | ||
1374 | 1362 | ||
1375 | /* 'POST_info' [R] */ | 1363 | /* 'POST_info' [R] */ |
1376 | entry = | 1364 | entry = proc_create_data("POST_info", S_IRUGO, acpi_device_dir(device), |
1377 | create_proc_entry("POST_info", S_IRUGO, acpi_device_dir(device)); | 1365 | &acpi_video_bus_POST_info_fops, |
1366 | acpi_driver_data(device)); | ||
1378 | if (!entry) | 1367 | if (!entry) |
1379 | return -ENODEV; | 1368 | return -ENODEV; |
1380 | else { | ||
1381 | entry->proc_fops = &acpi_video_bus_POST_info_fops; | ||
1382 | entry->data = acpi_driver_data(device); | ||
1383 | entry->owner = THIS_MODULE; | ||
1384 | } | ||
1385 | 1369 | ||
1386 | /* 'POST' [R/W] */ | 1370 | /* 'POST' [R/W] */ |
1387 | entry = | 1371 | acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST; |
1388 | create_proc_entry("POST", S_IFREG | S_IRUGO | S_IRUSR, | 1372 | entry = proc_create_data("POST", S_IFREG | S_IRUGO | S_IRUSR, |
1389 | acpi_device_dir(device)); | 1373 | acpi_device_dir(device), |
1374 | &acpi_video_bus_POST_fops, | ||
1375 | acpi_driver_data(device)); | ||
1390 | if (!entry) | 1376 | if (!entry) |
1391 | return -ENODEV; | 1377 | return -ENODEV; |
1392 | else { | ||
1393 | acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST; | ||
1394 | entry->proc_fops = &acpi_video_bus_POST_fops; | ||
1395 | entry->data = acpi_driver_data(device); | ||
1396 | entry->owner = THIS_MODULE; | ||
1397 | } | ||
1398 | 1378 | ||
1399 | /* 'DOS' [R/W] */ | 1379 | /* 'DOS' [R/W] */ |
1400 | entry = | 1380 | acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS; |
1401 | create_proc_entry("DOS", S_IFREG | S_IRUGO | S_IRUSR, | 1381 | entry = proc_create_data("DOS", S_IFREG | S_IRUGO | S_IRUSR, |
1402 | acpi_device_dir(device)); | 1382 | acpi_device_dir(device), |
1383 | &acpi_video_bus_DOS_fops, | ||
1384 | acpi_driver_data(device)); | ||
1403 | if (!entry) | 1385 | if (!entry) |
1404 | return -ENODEV; | 1386 | return -ENODEV; |
1405 | else { | ||
1406 | acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS; | ||
1407 | entry->proc_fops = &acpi_video_bus_DOS_fops; | ||
1408 | entry->data = acpi_driver_data(device); | ||
1409 | entry->owner = THIS_MODULE; | ||
1410 | } | ||
1411 | 1387 | ||
1412 | return 0; | 1388 | return 0; |
1413 | } | 1389 | } |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 1fef7df8c9d6..9fd4a8534146 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -396,6 +396,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, | |||
396 | if (!firmware_p) | 396 | if (!firmware_p) |
397 | return -EINVAL; | 397 | return -EINVAL; |
398 | 398 | ||
399 | printk(KERN_INFO "firmware: requesting %s\n", name); | ||
400 | |||
399 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); | 401 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); |
400 | if (!firmware) { | 402 | if (!firmware) { |
401 | printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n", | 403 | printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n", |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 280e71ee744c..5b4c6e649c11 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -195,7 +195,6 @@ void aoedev_exit(void); | |||
195 | struct aoedev *aoedev_by_aoeaddr(int maj, int min); | 195 | struct aoedev *aoedev_by_aoeaddr(int maj, int min); |
196 | struct aoedev *aoedev_by_sysminor_m(ulong sysminor); | 196 | struct aoedev *aoedev_by_sysminor_m(ulong sysminor); |
197 | void aoedev_downdev(struct aoedev *d); | 197 | void aoedev_downdev(struct aoedev *d); |
198 | int aoedev_isbusy(struct aoedev *d); | ||
199 | int aoedev_flush(const char __user *str, size_t size); | 198 | int aoedev_flush(const char __user *str, size_t size); |
200 | 199 | ||
201 | int aoenet_init(void); | 200 | int aoenet_init(void); |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index d00293ba3b45..8fc429cf82b6 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -668,16 +668,16 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id) | |||
668 | u16 n; | 668 | u16 n; |
669 | 669 | ||
670 | /* word 83: command set supported */ | 670 | /* word 83: command set supported */ |
671 | n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1])); | 671 | n = get_unaligned_le16(&id[83 << 1]); |
672 | 672 | ||
673 | /* word 86: command set/feature enabled */ | 673 | /* word 86: command set/feature enabled */ |
674 | n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1])); | 674 | n |= get_unaligned_le16(&id[86 << 1]); |
675 | 675 | ||
676 | if (n & (1<<10)) { /* bit 10: LBA 48 */ | 676 | if (n & (1<<10)) { /* bit 10: LBA 48 */ |
677 | d->flags |= DEVFL_EXT; | 677 | d->flags |= DEVFL_EXT; |
678 | 678 | ||
679 | /* word 100: number lba48 sectors */ | 679 | /* word 100: number lba48 sectors */ |
680 | ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1])); | 680 | ssize = get_unaligned_le64(&id[100 << 1]); |
681 | 681 | ||
682 | /* set as in ide-disk.c:init_idedisk_capacity */ | 682 | /* set as in ide-disk.c:init_idedisk_capacity */ |
683 | d->geo.cylinders = ssize; | 683 | d->geo.cylinders = ssize; |
@@ -688,12 +688,12 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id) | |||
688 | d->flags &= ~DEVFL_EXT; | 688 | d->flags &= ~DEVFL_EXT; |
689 | 689 | ||
690 | /* number lba28 sectors */ | 690 | /* number lba28 sectors */ |
691 | ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1])); | 691 | ssize = get_unaligned_le32(&id[60 << 1]); |
692 | 692 | ||
693 | /* NOTE: obsolete in ATA 6 */ | 693 | /* NOTE: obsolete in ATA 6 */ |
694 | d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1])); | 694 | d->geo.cylinders = get_unaligned_le16(&id[54 << 1]); |
695 | d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1])); | 695 | d->geo.heads = get_unaligned_le16(&id[55 << 1]); |
696 | d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1])); | 696 | d->geo.sectors = get_unaligned_le16(&id[56 << 1]); |
697 | } | 697 | } |
698 | 698 | ||
699 | if (d->ssize != ssize) | 699 | if (d->ssize != ssize) |
@@ -779,7 +779,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
779 | u16 aoemajor; | 779 | u16 aoemajor; |
780 | 780 | ||
781 | hin = (struct aoe_hdr *) skb_mac_header(skb); | 781 | hin = (struct aoe_hdr *) skb_mac_header(skb); |
782 | aoemajor = be16_to_cpu(get_unaligned(&hin->major)); | 782 | aoemajor = get_unaligned_be16(&hin->major); |
783 | d = aoedev_by_aoeaddr(aoemajor, hin->minor); | 783 | d = aoedev_by_aoeaddr(aoemajor, hin->minor); |
784 | if (d == NULL) { | 784 | if (d == NULL) { |
785 | snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " | 785 | snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " |
@@ -791,7 +791,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
791 | 791 | ||
792 | spin_lock_irqsave(&d->lock, flags); | 792 | spin_lock_irqsave(&d->lock, flags); |
793 | 793 | ||
794 | n = be32_to_cpu(get_unaligned(&hin->tag)); | 794 | n = get_unaligned_be32(&hin->tag); |
795 | t = gettgt(d, hin->src); | 795 | t = gettgt(d, hin->src); |
796 | if (t == NULL) { | 796 | if (t == NULL) { |
797 | printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n", | 797 | printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n", |
@@ -806,9 +806,9 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
806 | snprintf(ebuf, sizeof ebuf, | 806 | snprintf(ebuf, sizeof ebuf, |
807 | "%15s e%d.%d tag=%08x@%08lx\n", | 807 | "%15s e%d.%d tag=%08x@%08lx\n", |
808 | "unexpected rsp", | 808 | "unexpected rsp", |
809 | be16_to_cpu(get_unaligned(&hin->major)), | 809 | get_unaligned_be16(&hin->major), |
810 | hin->minor, | 810 | hin->minor, |
811 | be32_to_cpu(get_unaligned(&hin->tag)), | 811 | get_unaligned_be32(&hin->tag), |
812 | jiffies); | 812 | jiffies); |
813 | aoechr_error(ebuf); | 813 | aoechr_error(ebuf); |
814 | return; | 814 | return; |
@@ -873,7 +873,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
873 | printk(KERN_INFO | 873 | printk(KERN_INFO |
874 | "aoe: unrecognized ata command %2.2Xh for %d.%d\n", | 874 | "aoe: unrecognized ata command %2.2Xh for %d.%d\n", |
875 | ahout->cmdstat, | 875 | ahout->cmdstat, |
876 | be16_to_cpu(get_unaligned(&hin->major)), | 876 | get_unaligned_be16(&hin->major), |
877 | hin->minor); | 877 | hin->minor); |
878 | } | 878 | } |
879 | } | 879 | } |
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index f9a1cd9edb77..a1d813ab0d6b 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c | |||
@@ -18,24 +18,6 @@ static void skbpoolfree(struct aoedev *d); | |||
18 | static struct aoedev *devlist; | 18 | static struct aoedev *devlist; |
19 | static DEFINE_SPINLOCK(devlist_lock); | 19 | static DEFINE_SPINLOCK(devlist_lock); |
20 | 20 | ||
21 | int | ||
22 | aoedev_isbusy(struct aoedev *d) | ||
23 | { | ||
24 | struct aoetgt **t, **te; | ||
25 | struct frame *f, *e; | ||
26 | |||
27 | t = d->targets; | ||
28 | te = t + NTARGETS; | ||
29 | for (; t < te && *t; t++) { | ||
30 | f = (*t)->frames; | ||
31 | e = f + (*t)->nframes; | ||
32 | for (; f < e; f++) | ||
33 | if (f->tag != FREETAG) | ||
34 | return 1; | ||
35 | } | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | struct aoedev * | 21 | struct aoedev * |
40 | aoedev_by_aoeaddr(int maj, int min) | 22 | aoedev_by_aoeaddr(int maj, int min) |
41 | { | 23 | { |
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 18d243c73eee..d625169c8e48 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c | |||
@@ -128,7 +128,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, | |||
128 | skb_push(skb, ETH_HLEN); /* (1) */ | 128 | skb_push(skb, ETH_HLEN); /* (1) */ |
129 | 129 | ||
130 | h = (struct aoe_hdr *) skb_mac_header(skb); | 130 | h = (struct aoe_hdr *) skb_mac_header(skb); |
131 | n = be32_to_cpu(get_unaligned(&h->tag)); | 131 | n = get_unaligned_be32(&h->tag); |
132 | if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) | 132 | if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) |
133 | goto exit; | 133 | goto exit; |
134 | 134 | ||
@@ -140,7 +140,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, | |||
140 | printk(KERN_ERR | 140 | printk(KERN_ERR |
141 | "%s%d.%d@%s; ecode=%d '%s'\n", | 141 | "%s%d.%d@%s; ecode=%d '%s'\n", |
142 | "aoe: error packet from ", | 142 | "aoe: error packet from ", |
143 | be16_to_cpu(get_unaligned(&h->major)), | 143 | get_unaligned_be16(&h->major), |
144 | h->minor, skb->dev->name, | 144 | h->minor, skb->dev->name, |
145 | h->err, aoe_errlist[n]); | 145 | h->err, aoe_errlist[n]); |
146 | goto exit; | 146 | goto exit; |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index cf6083a1f928..e539be5750dc 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -425,7 +425,7 @@ static void __devinit cciss_procinit(int i) | |||
425 | struct proc_dir_entry *pde; | 425 | struct proc_dir_entry *pde; |
426 | 426 | ||
427 | if (proc_cciss == NULL) | 427 | if (proc_cciss == NULL) |
428 | proc_cciss = proc_mkdir("cciss", proc_root_driver); | 428 | proc_cciss = proc_mkdir("driver/cciss", NULL); |
429 | if (!proc_cciss) | 429 | if (!proc_cciss) |
430 | return; | 430 | return; |
431 | pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | | 431 | pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | |
@@ -3700,7 +3700,7 @@ static void __exit cciss_cleanup(void) | |||
3700 | cciss_remove_one(hba[i]->pdev); | 3700 | cciss_remove_one(hba[i]->pdev); |
3701 | } | 3701 | } |
3702 | } | 3702 | } |
3703 | remove_proc_entry("cciss", proc_root_driver); | 3703 | remove_proc_entry("driver/cciss", NULL); |
3704 | } | 3704 | } |
3705 | 3705 | ||
3706 | static void fail_all_cmds(unsigned long ctlr) | 3706 | static void fail_all_cmds(unsigned long ctlr) |
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 69199185ff4b..09c14341e6e3 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -214,7 +214,7 @@ static struct proc_dir_entry *proc_array; | |||
214 | static void __init ida_procinit(int i) | 214 | static void __init ida_procinit(int i) |
215 | { | 215 | { |
216 | if (proc_array == NULL) { | 216 | if (proc_array == NULL) { |
217 | proc_array = proc_mkdir("cpqarray", proc_root_driver); | 217 | proc_array = proc_mkdir("driver/cpqarray", NULL); |
218 | if (!proc_array) return; | 218 | if (!proc_array) return; |
219 | } | 219 | } |
220 | 220 | ||
@@ -1796,7 +1796,7 @@ static void __exit cpqarray_exit(void) | |||
1796 | } | 1796 | } |
1797 | } | 1797 | } |
1798 | 1798 | ||
1799 | remove_proc_entry("cpqarray", proc_root_driver); | 1799 | remove_proc_entry("driver/cpqarray", NULL); |
1800 | } | 1800 | } |
1801 | 1801 | ||
1802 | module_init(cpqarray_init) | 1802 | module_init(cpqarray_init) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 7652e87d60c5..395f8ea7981c 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4526,14 +4526,15 @@ static void __init parse_floppy_cfg_string(char *cfg) | |||
4526 | } | 4526 | } |
4527 | } | 4527 | } |
4528 | 4528 | ||
4529 | int __init init_module(void) | 4529 | static int __init floppy_module_init(void) |
4530 | { | 4530 | { |
4531 | if (floppy) | 4531 | if (floppy) |
4532 | parse_floppy_cfg_string(floppy); | 4532 | parse_floppy_cfg_string(floppy); |
4533 | return floppy_init(); | 4533 | return floppy_init(); |
4534 | } | 4534 | } |
4535 | module_init(floppy_module_init); | ||
4535 | 4536 | ||
4536 | void cleanup_module(void) | 4537 | static void __exit floppy_module_exit(void) |
4537 | { | 4538 | { |
4538 | int drive; | 4539 | int drive; |
4539 | 4540 | ||
@@ -4562,6 +4563,7 @@ void cleanup_module(void) | |||
4562 | /* eject disk, if any */ | 4563 | /* eject disk, if any */ |
4563 | fd_eject(0); | 4564 | fd_eject(0); |
4564 | } | 4565 | } |
4566 | module_exit(floppy_module_exit); | ||
4565 | 4567 | ||
4566 | module_param(floppy, charp, 0); | 4568 | module_param(floppy, charp, 0); |
4567 | module_param(FLOPPY_IRQ, int, 0); | 4569 | module_param(FLOPPY_IRQ, int, 0); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index f75bda16a1fc..ad98dda6037d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <net/sock.h> | 30 | #include <net/sock.h> |
31 | #include <linux/net.h> | 31 | #include <linux/net.h> |
32 | #include <linux/kthread.h> | ||
32 | 33 | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | #include <asm/system.h> | 35 | #include <asm/system.h> |
@@ -55,6 +56,7 @@ static unsigned int debugflags; | |||
55 | 56 | ||
56 | static unsigned int nbds_max = 16; | 57 | static unsigned int nbds_max = 16; |
57 | static struct nbd_device *nbd_dev; | 58 | static struct nbd_device *nbd_dev; |
59 | static int max_part; | ||
58 | 60 | ||
59 | /* | 61 | /* |
60 | * Use just one lock (or at most 1 per NIC). Two arguments for this: | 62 | * Use just one lock (or at most 1 per NIC). Two arguments for this: |
@@ -337,7 +339,7 @@ static struct request *nbd_read_stat(struct nbd_device *lo) | |||
337 | } | 339 | } |
338 | 340 | ||
339 | req = nbd_find_request(lo, *(struct request **)reply.handle); | 341 | req = nbd_find_request(lo, *(struct request **)reply.handle); |
340 | if (unlikely(IS_ERR(req))) { | 342 | if (IS_ERR(req)) { |
341 | result = PTR_ERR(req); | 343 | result = PTR_ERR(req); |
342 | if (result != -ENOENT) | 344 | if (result != -ENOENT) |
343 | goto harderror; | 345 | goto harderror; |
@@ -441,6 +443,85 @@ static void nbd_clear_que(struct nbd_device *lo) | |||
441 | } | 443 | } |
442 | 444 | ||
443 | 445 | ||
446 | static void nbd_handle_req(struct nbd_device *lo, struct request *req) | ||
447 | { | ||
448 | if (!blk_fs_request(req)) | ||
449 | goto error_out; | ||
450 | |||
451 | nbd_cmd(req) = NBD_CMD_READ; | ||
452 | if (rq_data_dir(req) == WRITE) { | ||
453 | nbd_cmd(req) = NBD_CMD_WRITE; | ||
454 | if (lo->flags & NBD_READ_ONLY) { | ||
455 | printk(KERN_ERR "%s: Write on read-only\n", | ||
456 | lo->disk->disk_name); | ||
457 | goto error_out; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | req->errors = 0; | ||
462 | |||
463 | mutex_lock(&lo->tx_lock); | ||
464 | if (unlikely(!lo->sock)) { | ||
465 | mutex_unlock(&lo->tx_lock); | ||
466 | printk(KERN_ERR "%s: Attempted send on closed socket\n", | ||
467 | lo->disk->disk_name); | ||
468 | req->errors++; | ||
469 | nbd_end_request(req); | ||
470 | return; | ||
471 | } | ||
472 | |||
473 | lo->active_req = req; | ||
474 | |||
475 | if (nbd_send_req(lo, req) != 0) { | ||
476 | printk(KERN_ERR "%s: Request send failed\n", | ||
477 | lo->disk->disk_name); | ||
478 | req->errors++; | ||
479 | nbd_end_request(req); | ||
480 | } else { | ||
481 | spin_lock(&lo->queue_lock); | ||
482 | list_add(&req->queuelist, &lo->queue_head); | ||
483 | spin_unlock(&lo->queue_lock); | ||
484 | } | ||
485 | |||
486 | lo->active_req = NULL; | ||
487 | mutex_unlock(&lo->tx_lock); | ||
488 | wake_up_all(&lo->active_wq); | ||
489 | |||
490 | return; | ||
491 | |||
492 | error_out: | ||
493 | req->errors++; | ||
494 | nbd_end_request(req); | ||
495 | } | ||
496 | |||
497 | static int nbd_thread(void *data) | ||
498 | { | ||
499 | struct nbd_device *lo = data; | ||
500 | struct request *req; | ||
501 | |||
502 | set_user_nice(current, -20); | ||
503 | while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) { | ||
504 | /* wait for something to do */ | ||
505 | wait_event_interruptible(lo->waiting_wq, | ||
506 | kthread_should_stop() || | ||
507 | !list_empty(&lo->waiting_queue)); | ||
508 | |||
509 | /* extract request */ | ||
510 | if (list_empty(&lo->waiting_queue)) | ||
511 | continue; | ||
512 | |||
513 | spin_lock_irq(&lo->queue_lock); | ||
514 | req = list_entry(lo->waiting_queue.next, struct request, | ||
515 | queuelist); | ||
516 | list_del_init(&req->queuelist); | ||
517 | spin_unlock_irq(&lo->queue_lock); | ||
518 | |||
519 | /* handle request */ | ||
520 | nbd_handle_req(lo, req); | ||
521 | } | ||
522 | return 0; | ||
523 | } | ||
524 | |||
444 | /* | 525 | /* |
445 | * We always wait for result of write, for now. It would be nice to make it optional | 526 | * We always wait for result of write, for now. It would be nice to make it optional |
446 | * in future | 527 | * in future |
@@ -456,65 +537,23 @@ static void do_nbd_request(struct request_queue * q) | |||
456 | struct nbd_device *lo; | 537 | struct nbd_device *lo; |
457 | 538 | ||
458 | blkdev_dequeue_request(req); | 539 | blkdev_dequeue_request(req); |
540 | |||
541 | spin_unlock_irq(q->queue_lock); | ||
542 | |||
459 | dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", | 543 | dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", |
460 | req->rq_disk->disk_name, req, req->cmd_type); | 544 | req->rq_disk->disk_name, req, req->cmd_type); |
461 | 545 | ||
462 | if (!blk_fs_request(req)) | ||
463 | goto error_out; | ||
464 | |||
465 | lo = req->rq_disk->private_data; | 546 | lo = req->rq_disk->private_data; |
466 | 547 | ||
467 | BUG_ON(lo->magic != LO_MAGIC); | 548 | BUG_ON(lo->magic != LO_MAGIC); |
468 | 549 | ||
469 | nbd_cmd(req) = NBD_CMD_READ; | 550 | spin_lock_irq(&lo->queue_lock); |
470 | if (rq_data_dir(req) == WRITE) { | 551 | list_add_tail(&req->queuelist, &lo->waiting_queue); |
471 | nbd_cmd(req) = NBD_CMD_WRITE; | 552 | spin_unlock_irq(&lo->queue_lock); |
472 | if (lo->flags & NBD_READ_ONLY) { | ||
473 | printk(KERN_ERR "%s: Write on read-only\n", | ||
474 | lo->disk->disk_name); | ||
475 | goto error_out; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | req->errors = 0; | ||
480 | spin_unlock_irq(q->queue_lock); | ||
481 | |||
482 | mutex_lock(&lo->tx_lock); | ||
483 | if (unlikely(!lo->sock)) { | ||
484 | mutex_unlock(&lo->tx_lock); | ||
485 | printk(KERN_ERR "%s: Attempted send on closed socket\n", | ||
486 | lo->disk->disk_name); | ||
487 | req->errors++; | ||
488 | nbd_end_request(req); | ||
489 | spin_lock_irq(q->queue_lock); | ||
490 | continue; | ||
491 | } | ||
492 | |||
493 | lo->active_req = req; | ||
494 | 553 | ||
495 | if (nbd_send_req(lo, req) != 0) { | 554 | wake_up(&lo->waiting_wq); |
496 | printk(KERN_ERR "%s: Request send failed\n", | ||
497 | lo->disk->disk_name); | ||
498 | req->errors++; | ||
499 | nbd_end_request(req); | ||
500 | } else { | ||
501 | spin_lock(&lo->queue_lock); | ||
502 | list_add(&req->queuelist, &lo->queue_head); | ||
503 | spin_unlock(&lo->queue_lock); | ||
504 | } | ||
505 | |||
506 | lo->active_req = NULL; | ||
507 | mutex_unlock(&lo->tx_lock); | ||
508 | wake_up_all(&lo->active_wq); | ||
509 | 555 | ||
510 | spin_lock_irq(q->queue_lock); | 556 | spin_lock_irq(q->queue_lock); |
511 | continue; | ||
512 | |||
513 | error_out: | ||
514 | req->errors++; | ||
515 | spin_unlock(q->queue_lock); | ||
516 | nbd_end_request(req); | ||
517 | spin_lock(q->queue_lock); | ||
518 | } | 557 | } |
519 | } | 558 | } |
520 | 559 | ||
@@ -524,6 +563,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
524 | struct nbd_device *lo = inode->i_bdev->bd_disk->private_data; | 563 | struct nbd_device *lo = inode->i_bdev->bd_disk->private_data; |
525 | int error; | 564 | int error; |
526 | struct request sreq ; | 565 | struct request sreq ; |
566 | struct task_struct *thread; | ||
527 | 567 | ||
528 | if (!capable(CAP_SYS_ADMIN)) | 568 | if (!capable(CAP_SYS_ADMIN)) |
529 | return -EPERM; | 569 | return -EPERM; |
@@ -572,10 +612,13 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
572 | error = -EINVAL; | 612 | error = -EINVAL; |
573 | file = fget(arg); | 613 | file = fget(arg); |
574 | if (file) { | 614 | if (file) { |
615 | struct block_device *bdev = inode->i_bdev; | ||
575 | inode = file->f_path.dentry->d_inode; | 616 | inode = file->f_path.dentry->d_inode; |
576 | if (S_ISSOCK(inode->i_mode)) { | 617 | if (S_ISSOCK(inode->i_mode)) { |
577 | lo->file = file; | 618 | lo->file = file; |
578 | lo->sock = SOCKET_I(inode); | 619 | lo->sock = SOCKET_I(inode); |
620 | if (max_part > 0) | ||
621 | bdev->bd_invalidated = 1; | ||
579 | error = 0; | 622 | error = 0; |
580 | } else { | 623 | } else { |
581 | fput(file); | 624 | fput(file); |
@@ -607,7 +650,12 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
607 | case NBD_DO_IT: | 650 | case NBD_DO_IT: |
608 | if (!lo->file) | 651 | if (!lo->file) |
609 | return -EINVAL; | 652 | return -EINVAL; |
653 | thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); | ||
654 | if (IS_ERR(thread)) | ||
655 | return PTR_ERR(thread); | ||
656 | wake_up_process(thread); | ||
610 | error = nbd_do_it(lo); | 657 | error = nbd_do_it(lo); |
658 | kthread_stop(thread); | ||
611 | if (error) | 659 | if (error) |
612 | return error; | 660 | return error; |
613 | sock_shutdown(lo, 1); | 661 | sock_shutdown(lo, 1); |
@@ -620,6 +668,8 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
620 | lo->bytesize = 0; | 668 | lo->bytesize = 0; |
621 | inode->i_bdev->bd_inode->i_size = 0; | 669 | inode->i_bdev->bd_inode->i_size = 0; |
622 | set_capacity(lo->disk, 0); | 670 | set_capacity(lo->disk, 0); |
671 | if (max_part > 0) | ||
672 | ioctl_by_bdev(inode->i_bdev, BLKRRPART, 0); | ||
623 | return lo->harderror; | 673 | return lo->harderror; |
624 | case NBD_CLEAR_QUE: | 674 | case NBD_CLEAR_QUE: |
625 | /* | 675 | /* |
@@ -653,6 +703,7 @@ static int __init nbd_init(void) | |||
653 | { | 703 | { |
654 | int err = -ENOMEM; | 704 | int err = -ENOMEM; |
655 | int i; | 705 | int i; |
706 | int part_shift; | ||
656 | 707 | ||
657 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); | 708 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); |
658 | 709 | ||
@@ -660,8 +711,17 @@ static int __init nbd_init(void) | |||
660 | if (!nbd_dev) | 711 | if (!nbd_dev) |
661 | return -ENOMEM; | 712 | return -ENOMEM; |
662 | 713 | ||
714 | if (max_part < 0) { | ||
715 | printk(KERN_CRIT "nbd: max_part must be >= 0\n"); | ||
716 | return -EINVAL; | ||
717 | } | ||
718 | |||
719 | part_shift = 0; | ||
720 | if (max_part > 0) | ||
721 | part_shift = fls(max_part); | ||
722 | |||
663 | for (i = 0; i < nbds_max; i++) { | 723 | for (i = 0; i < nbds_max; i++) { |
664 | struct gendisk *disk = alloc_disk(1); | 724 | struct gendisk *disk = alloc_disk(1 << part_shift); |
665 | elevator_t *old_e; | 725 | elevator_t *old_e; |
666 | if (!disk) | 726 | if (!disk) |
667 | goto out; | 727 | goto out; |
@@ -696,17 +756,18 @@ static int __init nbd_init(void) | |||
696 | nbd_dev[i].file = NULL; | 756 | nbd_dev[i].file = NULL; |
697 | nbd_dev[i].magic = LO_MAGIC; | 757 | nbd_dev[i].magic = LO_MAGIC; |
698 | nbd_dev[i].flags = 0; | 758 | nbd_dev[i].flags = 0; |
759 | INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); | ||
699 | spin_lock_init(&nbd_dev[i].queue_lock); | 760 | spin_lock_init(&nbd_dev[i].queue_lock); |
700 | INIT_LIST_HEAD(&nbd_dev[i].queue_head); | 761 | INIT_LIST_HEAD(&nbd_dev[i].queue_head); |
701 | mutex_init(&nbd_dev[i].tx_lock); | 762 | mutex_init(&nbd_dev[i].tx_lock); |
702 | init_waitqueue_head(&nbd_dev[i].active_wq); | 763 | init_waitqueue_head(&nbd_dev[i].active_wq); |
764 | init_waitqueue_head(&nbd_dev[i].waiting_wq); | ||
703 | nbd_dev[i].blksize = 1024; | 765 | nbd_dev[i].blksize = 1024; |
704 | nbd_dev[i].bytesize = 0; | 766 | nbd_dev[i].bytesize = 0; |
705 | disk->major = NBD_MAJOR; | 767 | disk->major = NBD_MAJOR; |
706 | disk->first_minor = i; | 768 | disk->first_minor = i << part_shift; |
707 | disk->fops = &nbd_fops; | 769 | disk->fops = &nbd_fops; |
708 | disk->private_data = &nbd_dev[i]; | 770 | disk->private_data = &nbd_dev[i]; |
709 | disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; | ||
710 | sprintf(disk->disk_name, "nbd%d", i); | 771 | sprintf(disk->disk_name, "nbd%d", i); |
711 | set_capacity(disk, 0); | 772 | set_capacity(disk, 0); |
712 | add_disk(disk); | 773 | add_disk(disk); |
@@ -744,7 +805,9 @@ MODULE_DESCRIPTION("Network Block Device"); | |||
744 | MODULE_LICENSE("GPL"); | 805 | MODULE_LICENSE("GPL"); |
745 | 806 | ||
746 | module_param(nbds_max, int, 0444); | 807 | module_param(nbds_max, int, 0444); |
747 | MODULE_PARM_DESC(nbds_max, "How many network block devices to initialize."); | 808 | MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); |
809 | module_param(max_part, int, 0444); | ||
810 | MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); | ||
748 | #ifndef NDEBUG | 811 | #ifndef NDEBUG |
749 | module_param(debugflags, int, 0644); | 812 | module_param(debugflags, int, 0644); |
750 | MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); | 813 | MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 3b806c9fb005..3ba1df93e9e3 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2742,7 +2742,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) | |||
2742 | int i; | 2742 | int i; |
2743 | int ret = 0; | 2743 | int ret = 0; |
2744 | char b[BDEVNAME_SIZE]; | 2744 | char b[BDEVNAME_SIZE]; |
2745 | struct proc_dir_entry *proc; | ||
2746 | struct block_device *bdev; | 2745 | struct block_device *bdev; |
2747 | 2746 | ||
2748 | if (pd->pkt_dev == dev) { | 2747 | if (pd->pkt_dev == dev) { |
@@ -2786,11 +2785,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) | |||
2786 | goto out_mem; | 2785 | goto out_mem; |
2787 | } | 2786 | } |
2788 | 2787 | ||
2789 | proc = create_proc_entry(pd->name, 0, pkt_proc); | 2788 | proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd); |
2790 | if (proc) { | ||
2791 | proc->data = pd; | ||
2792 | proc->proc_fops = &pkt_proc_fops; | ||
2793 | } | ||
2794 | DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); | 2789 | DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); |
2795 | return 0; | 2790 | return 0; |
2796 | 2791 | ||
@@ -3099,7 +3094,7 @@ static int __init pkt_init(void) | |||
3099 | goto out_misc; | 3094 | goto out_misc; |
3100 | } | 3095 | } |
3101 | 3096 | ||
3102 | pkt_proc = proc_mkdir(DRIVER_NAME, proc_root_driver); | 3097 | pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL); |
3103 | 3098 | ||
3104 | return 0; | 3099 | return 0; |
3105 | 3100 | ||
@@ -3115,7 +3110,7 @@ out2: | |||
3115 | 3110 | ||
3116 | static void __exit pkt_exit(void) | 3111 | static void __exit pkt_exit(void) |
3117 | { | 3112 | { |
3118 | remove_proc_entry(DRIVER_NAME, proc_root_driver); | 3113 | remove_proc_entry("driver/"DRIVER_NAME, NULL); |
3119 | misc_deregister(&pkt_misc); | 3114 | misc_deregister(&pkt_misc); |
3120 | 3115 | ||
3121 | pkt_debugfs_cleanup(); | 3116 | pkt_debugfs_cleanup(); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index d771da816d95..f2fff5799ddf 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -137,7 +137,7 @@ static void blkif_restart_queue_callback(void *arg) | |||
137 | schedule_work(&info->work); | 137 | schedule_work(&info->work); |
138 | } | 138 | } |
139 | 139 | ||
140 | int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) | 140 | static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) |
141 | { | 141 | { |
142 | /* We don't have real geometry info, but let's at least return | 142 | /* We don't have real geometry info, but let's at least return |
143 | values consistent with the size of the device */ | 143 | values consistent with the size of the device */ |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index b74b6c2768a8..5245a4a0ba74 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -144,6 +144,7 @@ static int proc_viocd_open(struct inode *inode, struct file *file) | |||
144 | } | 144 | } |
145 | 145 | ||
146 | static const struct file_operations proc_viocd_operations = { | 146 | static const struct file_operations proc_viocd_operations = { |
147 | .owner = THIS_MODULE, | ||
147 | .open = proc_viocd_open, | 148 | .open = proc_viocd_open, |
148 | .read = seq_read, | 149 | .read = seq_read, |
149 | .llseek = seq_lseek, | 150 | .llseek = seq_lseek, |
@@ -679,7 +680,6 @@ static struct vio_driver viocd_driver = { | |||
679 | 680 | ||
680 | static int __init viocd_init(void) | 681 | static int __init viocd_init(void) |
681 | { | 682 | { |
682 | struct proc_dir_entry *e; | ||
683 | int ret = 0; | 683 | int ret = 0; |
684 | 684 | ||
685 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 685 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
@@ -719,12 +719,8 @@ static int __init viocd_init(void) | |||
719 | if (ret) | 719 | if (ret) |
720 | goto out_free_info; | 720 | goto out_free_info; |
721 | 721 | ||
722 | e = create_proc_entry("iSeries/viocd", S_IFREG|S_IRUGO, NULL); | 722 | proc_create("iSeries/viocd", S_IFREG|S_IRUGO, NULL, |
723 | if (e) { | 723 | &proc_viocd_operations); |
724 | e->owner = THIS_MODULE; | ||
725 | e->proc_fops = &proc_viocd_operations; | ||
726 | } | ||
727 | |||
728 | return 0; | 724 | return 0; |
729 | 725 | ||
730 | out_free_info: | 726 | out_free_info: |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 929d4fa73fd9..5dce3877eee5 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -80,6 +80,15 @@ config VT_HW_CONSOLE_BINDING | |||
80 | information. For framebuffer console users, please refer to | 80 | information. For framebuffer console users, please refer to |
81 | <file:Documentation/fb/fbcon.txt>. | 81 | <file:Documentation/fb/fbcon.txt>. |
82 | 82 | ||
83 | config DEVKMEM | ||
84 | bool "/dev/kmem virtual device support" | ||
85 | default y | ||
86 | help | ||
87 | Say Y here if you want to support the /dev/kmem device. The | ||
88 | /dev/kmem device is rarely used, but can be used for certain | ||
89 | kind of kernel debugging operations. | ||
90 | When in doubt, say "N". | ||
91 | |||
83 | config SERIAL_NONSTANDARD | 92 | config SERIAL_NONSTANDARD |
84 | bool "Non-standard serial port support" | 93 | bool "Non-standard serial port support" |
85 | depends on HAS_IOMEM | 94 | depends on HAS_IOMEM |
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index 17d54315e146..cdd876dbb2b0 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/poll.h> | 14 | #include <linux/poll.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/proc_fs.h> | 16 | #include <linux/proc_fs.h> |
17 | #include <linux/seq_file.h> | ||
17 | #include <linux/miscdevice.h> | 18 | #include <linux/miscdevice.h> |
18 | #include <linux/apm_bios.h> | 19 | #include <linux/apm_bios.h> |
19 | #include <linux/capability.h> | 20 | #include <linux/capability.h> |
@@ -493,11 +494,10 @@ static struct miscdevice apm_device = { | |||
493 | * -1: Unknown | 494 | * -1: Unknown |
494 | * 8) min = minutes; sec = seconds | 495 | * 8) min = minutes; sec = seconds |
495 | */ | 496 | */ |
496 | static int apm_get_info(char *buf, char **start, off_t fpos, int length) | 497 | static int proc_apm_show(struct seq_file *m, void *v) |
497 | { | 498 | { |
498 | struct apm_power_info info; | 499 | struct apm_power_info info; |
499 | char *units; | 500 | char *units; |
500 | int ret; | ||
501 | 501 | ||
502 | info.ac_line_status = 0xff; | 502 | info.ac_line_status = 0xff; |
503 | info.battery_status = 0xff; | 503 | info.battery_status = 0xff; |
@@ -515,14 +515,27 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length) | |||
515 | case 1: units = "sec"; break; | 515 | case 1: units = "sec"; break; |
516 | } | 516 | } |
517 | 517 | ||
518 | ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", | 518 | seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", |
519 | driver_version, APM_32_BIT_SUPPORT, | 519 | driver_version, APM_32_BIT_SUPPORT, |
520 | info.ac_line_status, info.battery_status, | 520 | info.ac_line_status, info.battery_status, |
521 | info.battery_flag, info.battery_life, | 521 | info.battery_flag, info.battery_life, |
522 | info.time, units); | 522 | info.time, units); |
523 | 523 | ||
524 | return ret; | 524 | return 0; |
525 | } | 525 | } |
526 | |||
527 | static int proc_apm_open(struct inode *inode, struct file *file) | ||
528 | { | ||
529 | return single_open(file, proc_apm_show, NULL); | ||
530 | } | ||
531 | |||
532 | static const struct file_operations apm_proc_fops = { | ||
533 | .owner = THIS_MODULE, | ||
534 | .open = proc_apm_open, | ||
535 | .read = seq_read, | ||
536 | .llseek = seq_lseek, | ||
537 | .release = single_release, | ||
538 | }; | ||
526 | #endif | 539 | #endif |
527 | 540 | ||
528 | static int kapmd(void *arg) | 541 | static int kapmd(void *arg) |
@@ -593,7 +606,7 @@ static int __init apm_init(void) | |||
593 | wake_up_process(kapmd_tsk); | 606 | wake_up_process(kapmd_tsk); |
594 | 607 | ||
595 | #ifdef CONFIG_PROC_FS | 608 | #ifdef CONFIG_PROC_FS |
596 | create_proc_info_entry("apm", 0, NULL, apm_get_info); | 609 | proc_create("apm", 0, NULL, &apm_proc_fops); |
597 | #endif | 610 | #endif |
598 | 611 | ||
599 | ret = misc_register(&apm_device); | 612 | ret = misc_register(&apm_device); |
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index 8609b8236c67..f49037b744f9 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c | |||
@@ -82,6 +82,7 @@ static int i8k_ioctl(struct inode *, struct file *, unsigned int, | |||
82 | unsigned long); | 82 | unsigned long); |
83 | 83 | ||
84 | static const struct file_operations i8k_fops = { | 84 | static const struct file_operations i8k_fops = { |
85 | .owner = THIS_MODULE, | ||
85 | .open = i8k_open_fs, | 86 | .open = i8k_open_fs, |
86 | .read = seq_read, | 87 | .read = seq_read, |
87 | .llseek = seq_lseek, | 88 | .llseek = seq_lseek, |
@@ -554,13 +555,10 @@ static int __init i8k_init(void) | |||
554 | return -ENODEV; | 555 | return -ENODEV; |
555 | 556 | ||
556 | /* Register the proc entry */ | 557 | /* Register the proc entry */ |
557 | proc_i8k = create_proc_entry("i8k", 0, NULL); | 558 | proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops); |
558 | if (!proc_i8k) | 559 | if (!proc_i8k) |
559 | return -ENOENT; | 560 | return -ENOENT; |
560 | 561 | ||
561 | proc_i8k->proc_fops = &i8k_fops; | ||
562 | proc_i8k->owner = THIS_MODULE; | ||
563 | |||
564 | printk(KERN_INFO | 562 | printk(KERN_INFO |
565 | "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n", | 563 | "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n", |
566 | I8K_VERSION); | 564 | I8K_VERSION); |
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index b1d6cad84282..0a61856c631f 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c | |||
@@ -133,8 +133,9 @@ | |||
133 | *****************/ | 133 | *****************/ |
134 | 134 | ||
135 | #include <linux/proc_fs.h> | 135 | #include <linux/proc_fs.h> |
136 | #include <linux/seq_file.h> | ||
136 | 137 | ||
137 | static int ip2_read_procmem(char *, char **, off_t, int); | 138 | static const struct file_operations ip2mem_proc_fops; |
138 | static int ip2_read_proc(char *, char **, off_t, int, int *, void * ); | 139 | static int ip2_read_proc(char *, char **, off_t, int, int *, void * ); |
139 | 140 | ||
140 | /********************/ | 141 | /********************/ |
@@ -423,7 +424,7 @@ cleanup_module(void) | |||
423 | } | 424 | } |
424 | put_tty_driver(ip2_tty_driver); | 425 | put_tty_driver(ip2_tty_driver); |
425 | unregister_chrdev(IP2_IPL_MAJOR, pcIpl); | 426 | unregister_chrdev(IP2_IPL_MAJOR, pcIpl); |
426 | remove_proc_entry("ip2mem", &proc_root); | 427 | remove_proc_entry("ip2mem", NULL); |
427 | 428 | ||
428 | // free memory | 429 | // free memory |
429 | for (i = 0; i < IP2_MAX_BOARDS; i++) { | 430 | for (i = 0; i < IP2_MAX_BOARDS; i++) { |
@@ -695,7 +696,7 @@ ip2_loadmain(int *iop, int *irqp, unsigned char *firmware, int firmsize) | |||
695 | } | 696 | } |
696 | } | 697 | } |
697 | /* Register the read_procmem thing */ | 698 | /* Register the read_procmem thing */ |
698 | if (!create_proc_info_entry("ip2mem",0,&proc_root,ip2_read_procmem)) { | 699 | if (!proc_create("ip2mem",0,NULL,&ip2mem_proc_fops)) { |
699 | printk(KERN_ERR "IP2: failed to register read_procmem\n"); | 700 | printk(KERN_ERR "IP2: failed to register read_procmem\n"); |
700 | } else { | 701 | } else { |
701 | 702 | ||
@@ -2967,65 +2968,61 @@ ip2_ipl_open( struct inode *pInode, struct file *pFile ) | |||
2967 | } | 2968 | } |
2968 | return 0; | 2969 | return 0; |
2969 | } | 2970 | } |
2970 | /******************************************************************************/ | ||
2971 | /* Function: ip2_read_procmem */ | ||
2972 | /* Parameters: */ | ||
2973 | /* */ | ||
2974 | /* Returns: Length of output */ | ||
2975 | /* */ | ||
2976 | /* Description: */ | ||
2977 | /* Supplies some driver operating parameters */ | ||
2978 | /* Not real useful unless your debugging the fifo */ | ||
2979 | /* */ | ||
2980 | /******************************************************************************/ | ||
2981 | |||
2982 | #define LIMIT (PAGE_SIZE - 120) | ||
2983 | 2971 | ||
2984 | static int | 2972 | static int |
2985 | ip2_read_procmem(char *buf, char **start, off_t offset, int len) | 2973 | proc_ip2mem_show(struct seq_file *m, void *v) |
2986 | { | 2974 | { |
2987 | i2eBordStrPtr pB; | 2975 | i2eBordStrPtr pB; |
2988 | i2ChanStrPtr pCh; | 2976 | i2ChanStrPtr pCh; |
2989 | PTTY tty; | 2977 | PTTY tty; |
2990 | int i; | 2978 | int i; |
2991 | 2979 | ||
2992 | len = 0; | ||
2993 | |||
2994 | #define FMTLINE "%3d: 0x%08x 0x%08x 0%011o 0%011o\n" | 2980 | #define FMTLINE "%3d: 0x%08x 0x%08x 0%011o 0%011o\n" |
2995 | #define FMTLIN2 " 0x%04x 0x%04x tx flow 0x%x\n" | 2981 | #define FMTLIN2 " 0x%04x 0x%04x tx flow 0x%x\n" |
2996 | #define FMTLIN3 " 0x%04x 0x%04x rc flow\n" | 2982 | #define FMTLIN3 " 0x%04x 0x%04x rc flow\n" |
2997 | 2983 | ||
2998 | len += sprintf(buf+len,"\n"); | 2984 | seq_printf(m,"\n"); |
2999 | 2985 | ||
3000 | for( i = 0; i < IP2_MAX_BOARDS; ++i ) { | 2986 | for( i = 0; i < IP2_MAX_BOARDS; ++i ) { |
3001 | pB = i2BoardPtrTable[i]; | 2987 | pB = i2BoardPtrTable[i]; |
3002 | if ( pB ) { | 2988 | if ( pB ) { |
3003 | len += sprintf(buf+len,"board %d:\n",i); | 2989 | seq_printf(m,"board %d:\n",i); |
3004 | len += sprintf(buf+len,"\tFifo rem: %d mty: %x outM %x\n", | 2990 | seq_printf(m,"\tFifo rem: %d mty: %x outM %x\n", |
3005 | pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting); | 2991 | pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting); |
3006 | } | 2992 | } |
3007 | } | 2993 | } |
3008 | 2994 | ||
3009 | len += sprintf(buf+len,"#: tty flags, port flags, cflags, iflags\n"); | 2995 | seq_printf(m,"#: tty flags, port flags, cflags, iflags\n"); |
3010 | for (i=0; i < IP2_MAX_PORTS; i++) { | 2996 | for (i=0; i < IP2_MAX_PORTS; i++) { |
3011 | if (len > LIMIT) | ||
3012 | break; | ||
3013 | pCh = DevTable[i]; | 2997 | pCh = DevTable[i]; |
3014 | if (pCh) { | 2998 | if (pCh) { |
3015 | tty = pCh->pTTY; | 2999 | tty = pCh->pTTY; |
3016 | if (tty && tty->count) { | 3000 | if (tty && tty->count) { |
3017 | len += sprintf(buf+len,FMTLINE,i,(int)tty->flags,pCh->flags, | 3001 | seq_printf(m,FMTLINE,i,(int)tty->flags,pCh->flags, |
3018 | tty->termios->c_cflag,tty->termios->c_iflag); | 3002 | tty->termios->c_cflag,tty->termios->c_iflag); |
3019 | 3003 | ||
3020 | len += sprintf(buf+len,FMTLIN2, | 3004 | seq_printf(m,FMTLIN2, |
3021 | pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds); | 3005 | pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds); |
3022 | len += sprintf(buf+len,FMTLIN3,pCh->infl.asof,pCh->infl.room); | 3006 | seq_printf(m,FMTLIN3,pCh->infl.asof,pCh->infl.room); |
3023 | } | 3007 | } |
3024 | } | 3008 | } |
3025 | } | 3009 | } |
3026 | return len; | 3010 | return 0; |
3011 | } | ||
3012 | |||
3013 | static int proc_ip2mem_open(struct inode *inode, struct file *file) | ||
3014 | { | ||
3015 | return single_open(file, proc_ip2mem_show, NULL); | ||
3027 | } | 3016 | } |
3028 | 3017 | ||
3018 | static const struct file_operations ip2mem_proc_fops = { | ||
3019 | .owner = THIS_MODULE, | ||
3020 | .open = proc_ip2mem_open, | ||
3021 | .read = seq_read, | ||
3022 | .llseek = seq_lseek, | ||
3023 | .release = single_release, | ||
3024 | }; | ||
3025 | |||
3029 | /* | 3026 | /* |
3030 | * This is the handler for /proc/tty/driver/ip2 | 3027 | * This is the handler for /proc/tty/driver/ip2 |
3031 | * | 3028 | * |
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index 553f0a408eda..eb8a1a8c188e 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile | |||
@@ -9,7 +9,3 @@ obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o | |||
9 | obj-$(CONFIG_IPMI_SI) += ipmi_si.o | 9 | obj-$(CONFIG_IPMI_SI) += ipmi_si.o |
10 | obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o | 10 | obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o |
11 | obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o | 11 | obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o |
12 | |||
13 | ipmi_si.o: $(ipmi_si-objs) | ||
14 | $(LD) -r -o $@ $(ipmi_si-objs) | ||
15 | |||
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index e736119b6497..7b98c067190a 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c | |||
@@ -37,26 +37,32 @@ | |||
37 | #define BT_DEBUG_ENABLE 1 /* Generic messages */ | 37 | #define BT_DEBUG_ENABLE 1 /* Generic messages */ |
38 | #define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ | 38 | #define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ |
39 | #define BT_DEBUG_STATES 4 /* Verbose look at state changes */ | 39 | #define BT_DEBUG_STATES 4 /* Verbose look at state changes */ |
40 | /* BT_DEBUG_OFF must be zero to correspond to the default uninitialized | 40 | /* |
41 | value */ | 41 | * BT_DEBUG_OFF must be zero to correspond to the default uninitialized |
42 | * value | ||
43 | */ | ||
42 | 44 | ||
43 | static int bt_debug; /* 0 == BT_DEBUG_OFF */ | 45 | static int bt_debug; /* 0 == BT_DEBUG_OFF */ |
44 | 46 | ||
45 | module_param(bt_debug, int, 0644); | 47 | module_param(bt_debug, int, 0644); |
46 | MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); | 48 | MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); |
47 | 49 | ||
48 | /* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, | 50 | /* |
49 | and 64 byte buffers. However, one HP implementation wants 255 bytes of | 51 | * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, |
50 | buffer (with a documented message of 160 bytes) so go for the max. | 52 | * and 64 byte buffers. However, one HP implementation wants 255 bytes of |
51 | Since the Open IPMI architecture is single-message oriented at this | 53 | * buffer (with a documented message of 160 bytes) so go for the max. |
52 | stage, the queue depth of BT is of no concern. */ | 54 | * Since the Open IPMI architecture is single-message oriented at this |
55 | * stage, the queue depth of BT is of no concern. | ||
56 | */ | ||
53 | 57 | ||
54 | #define BT_NORMAL_TIMEOUT 5 /* seconds */ | 58 | #define BT_NORMAL_TIMEOUT 5 /* seconds */ |
55 | #define BT_NORMAL_RETRY_LIMIT 2 | 59 | #define BT_NORMAL_RETRY_LIMIT 2 |
56 | #define BT_RESET_DELAY 6 /* seconds after warm reset */ | 60 | #define BT_RESET_DELAY 6 /* seconds after warm reset */ |
57 | 61 | ||
58 | /* States are written in chronological order and usually cover | 62 | /* |
59 | multiple rows of the state table discussion in the IPMI spec. */ | 63 | * States are written in chronological order and usually cover |
64 | * multiple rows of the state table discussion in the IPMI spec. | ||
65 | */ | ||
60 | 66 | ||
61 | enum bt_states { | 67 | enum bt_states { |
62 | BT_STATE_IDLE = 0, /* Order is critical in this list */ | 68 | BT_STATE_IDLE = 0, /* Order is critical in this list */ |
@@ -76,10 +82,12 @@ enum bt_states { | |||
76 | BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ | 82 | BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ |
77 | }; | 83 | }; |
78 | 84 | ||
79 | /* Macros seen at the end of state "case" blocks. They help with legibility | 85 | /* |
80 | and debugging. */ | 86 | * Macros seen at the end of state "case" blocks. They help with legibility |
87 | * and debugging. | ||
88 | */ | ||
81 | 89 | ||
82 | #define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; } | 90 | #define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; } |
83 | 91 | ||
84 | #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } | 92 | #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } |
85 | 93 | ||
@@ -110,11 +118,13 @@ struct si_sm_data { | |||
110 | #define BT_H_BUSY 0x40 | 118 | #define BT_H_BUSY 0x40 |
111 | #define BT_B_BUSY 0x80 | 119 | #define BT_B_BUSY 0x80 |
112 | 120 | ||
113 | /* Some bits are toggled on each write: write once to set it, once | 121 | /* |
114 | more to clear it; writing a zero does nothing. To absolutely | 122 | * Some bits are toggled on each write: write once to set it, once |
115 | clear it, check its state and write if set. This avoids the "get | 123 | * more to clear it; writing a zero does nothing. To absolutely |
116 | current then use as mask" scheme to modify one bit. Note that the | 124 | * clear it, check its state and write if set. This avoids the "get |
117 | variable "bt" is hardcoded into these macros. */ | 125 | * current then use as mask" scheme to modify one bit. Note that the |
126 | * variable "bt" is hardcoded into these macros. | ||
127 | */ | ||
118 | 128 | ||
119 | #define BT_STATUS bt->io->inputb(bt->io, 0) | 129 | #define BT_STATUS bt->io->inputb(bt->io, 0) |
120 | #define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) | 130 | #define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) |
@@ -125,8 +135,10 @@ struct si_sm_data { | |||
125 | #define BT_INTMASK_R bt->io->inputb(bt->io, 2) | 135 | #define BT_INTMASK_R bt->io->inputb(bt->io, 2) |
126 | #define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) | 136 | #define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) |
127 | 137 | ||
128 | /* Convenience routines for debugging. These are not multi-open safe! | 138 | /* |
129 | Note the macros have hardcoded variables in them. */ | 139 | * Convenience routines for debugging. These are not multi-open safe! |
140 | * Note the macros have hardcoded variables in them. | ||
141 | */ | ||
130 | 142 | ||
131 | static char *state2txt(unsigned char state) | 143 | static char *state2txt(unsigned char state) |
132 | { | 144 | { |
@@ -182,7 +194,8 @@ static char *status2txt(unsigned char status) | |||
182 | static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) | 194 | static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) |
183 | { | 195 | { |
184 | memset(bt, 0, sizeof(struct si_sm_data)); | 196 | memset(bt, 0, sizeof(struct si_sm_data)); |
185 | if (bt->io != io) { /* external: one-time only things */ | 197 | if (bt->io != io) { |
198 | /* external: one-time only things */ | ||
186 | bt->io = io; | 199 | bt->io = io; |
187 | bt->seq = 0; | 200 | bt->seq = 0; |
188 | } | 201 | } |
@@ -229,7 +242,7 @@ static int bt_start_transaction(struct si_sm_data *bt, | |||
229 | printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); | 242 | printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); |
230 | printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); | 243 | printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); |
231 | for (i = 0; i < size; i ++) | 244 | for (i = 0; i < size; i ++) |
232 | printk (" %02x", data[i]); | 245 | printk(" %02x", data[i]); |
233 | printk("\n"); | 246 | printk("\n"); |
234 | } | 247 | } |
235 | bt->write_data[0] = size + 1; /* all data plus seq byte */ | 248 | bt->write_data[0] = size + 1; /* all data plus seq byte */ |
@@ -246,8 +259,10 @@ static int bt_start_transaction(struct si_sm_data *bt, | |||
246 | return 0; | 259 | return 0; |
247 | } | 260 | } |
248 | 261 | ||
249 | /* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE | 262 | /* |
250 | it calls this. Strip out the length and seq bytes. */ | 263 | * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE |
264 | * it calls this. Strip out the length and seq bytes. | ||
265 | */ | ||
251 | 266 | ||
252 | static int bt_get_result(struct si_sm_data *bt, | 267 | static int bt_get_result(struct si_sm_data *bt, |
253 | unsigned char *data, | 268 | unsigned char *data, |
@@ -269,10 +284,10 @@ static int bt_get_result(struct si_sm_data *bt, | |||
269 | memcpy(data + 2, bt->read_data + 4, msg_len - 2); | 284 | memcpy(data + 2, bt->read_data + 4, msg_len - 2); |
270 | 285 | ||
271 | if (bt_debug & BT_DEBUG_MSG) { | 286 | if (bt_debug & BT_DEBUG_MSG) { |
272 | printk (KERN_WARNING "BT: result %d bytes:", msg_len); | 287 | printk(KERN_WARNING "BT: result %d bytes:", msg_len); |
273 | for (i = 0; i < msg_len; i++) | 288 | for (i = 0; i < msg_len; i++) |
274 | printk(" %02x", data[i]); | 289 | printk(" %02x", data[i]); |
275 | printk ("\n"); | 290 | printk("\n"); |
276 | } | 291 | } |
277 | return msg_len; | 292 | return msg_len; |
278 | } | 293 | } |
@@ -292,8 +307,10 @@ static void reset_flags(struct si_sm_data *bt) | |||
292 | BT_INTMASK_W(BT_BMC_HWRST); | 307 | BT_INTMASK_W(BT_BMC_HWRST); |
293 | } | 308 | } |
294 | 309 | ||
295 | /* Get rid of an unwanted/stale response. This should only be needed for | 310 | /* |
296 | BMCs that support multiple outstanding requests. */ | 311 | * Get rid of an unwanted/stale response. This should only be needed for |
312 | * BMCs that support multiple outstanding requests. | ||
313 | */ | ||
297 | 314 | ||
298 | static void drain_BMC2HOST(struct si_sm_data *bt) | 315 | static void drain_BMC2HOST(struct si_sm_data *bt) |
299 | { | 316 | { |
@@ -326,8 +343,8 @@ static inline void write_all_bytes(struct si_sm_data *bt) | |||
326 | printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", | 343 | printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", |
327 | bt->write_count, bt->seq); | 344 | bt->write_count, bt->seq); |
328 | for (i = 0; i < bt->write_count; i++) | 345 | for (i = 0; i < bt->write_count; i++) |
329 | printk (" %02x", bt->write_data[i]); | 346 | printk(" %02x", bt->write_data[i]); |
330 | printk ("\n"); | 347 | printk("\n"); |
331 | } | 348 | } |
332 | for (i = 0; i < bt->write_count; i++) | 349 | for (i = 0; i < bt->write_count; i++) |
333 | HOST2BMC(bt->write_data[i]); | 350 | HOST2BMC(bt->write_data[i]); |
@@ -337,8 +354,10 @@ static inline int read_all_bytes(struct si_sm_data *bt) | |||
337 | { | 354 | { |
338 | unsigned char i; | 355 | unsigned char i; |
339 | 356 | ||
340 | /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. | 357 | /* |
341 | Keep layout of first four bytes aligned with write_data[] */ | 358 | * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. |
359 | * Keep layout of first four bytes aligned with write_data[] | ||
360 | */ | ||
342 | 361 | ||
343 | bt->read_data[0] = BMC2HOST; | 362 | bt->read_data[0] = BMC2HOST; |
344 | bt->read_count = bt->read_data[0]; | 363 | bt->read_count = bt->read_data[0]; |
@@ -362,8 +381,8 @@ static inline int read_all_bytes(struct si_sm_data *bt) | |||
362 | if (max > 16) | 381 | if (max > 16) |
363 | max = 16; | 382 | max = 16; |
364 | for (i = 0; i < max; i++) | 383 | for (i = 0; i < max; i++) |
365 | printk (" %02x", bt->read_data[i]); | 384 | printk(KERN_CONT " %02x", bt->read_data[i]); |
366 | printk ("%s\n", bt->read_count == max ? "" : " ..."); | 385 | printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ..."); |
367 | } | 386 | } |
368 | 387 | ||
369 | /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ | 388 | /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ |
@@ -402,8 +421,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt, | |||
402 | printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ | 421 | printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ |
403 | reason, STATE2TXT, STATUS2TXT); | 422 | reason, STATE2TXT, STATUS2TXT); |
404 | 423 | ||
405 | /* Per the IPMI spec, retries are based on the sequence number | 424 | /* |
406 | known only to this module, so manage a restart here. */ | 425 | * Per the IPMI spec, retries are based on the sequence number |
426 | * known only to this module, so manage a restart here. | ||
427 | */ | ||
407 | (bt->error_retries)++; | 428 | (bt->error_retries)++; |
408 | if (bt->error_retries < bt->BT_CAP_retries) { | 429 | if (bt->error_retries < bt->BT_CAP_retries) { |
409 | printk("%d retries left\n", | 430 | printk("%d retries left\n", |
@@ -412,8 +433,8 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt, | |||
412 | return SI_SM_CALL_WITHOUT_DELAY; | 433 | return SI_SM_CALL_WITHOUT_DELAY; |
413 | } | 434 | } |
414 | 435 | ||
415 | printk("failed %d retries, sending error response\n", | 436 | printk(KERN_WARNING "failed %d retries, sending error response\n", |
416 | bt->BT_CAP_retries); | 437 | bt->BT_CAP_retries); |
417 | if (!bt->nonzero_status) | 438 | if (!bt->nonzero_status) |
418 | printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); | 439 | printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); |
419 | 440 | ||
@@ -424,8 +445,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt, | |||
424 | return SI_SM_CALL_WITHOUT_DELAY; | 445 | return SI_SM_CALL_WITHOUT_DELAY; |
425 | } | 446 | } |
426 | 447 | ||
427 | /* Concoct a useful error message, set up the next state, and | 448 | /* |
428 | be done with this sequence. */ | 449 | * Concoct a useful error message, set up the next state, and |
450 | * be done with this sequence. | ||
451 | */ | ||
429 | 452 | ||
430 | bt->state = BT_STATE_IDLE; | 453 | bt->state = BT_STATE_IDLE; |
431 | switch (cCode) { | 454 | switch (cCode) { |
@@ -461,10 +484,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
461 | last_printed = bt->state; | 484 | last_printed = bt->state; |
462 | } | 485 | } |
463 | 486 | ||
464 | /* Commands that time out may still (eventually) provide a response. | 487 | /* |
465 | This stale response will get in the way of a new response so remove | 488 | * Commands that time out may still (eventually) provide a response. |
466 | it if possible (hopefully during IDLE). Even if it comes up later | 489 | * This stale response will get in the way of a new response so remove |
467 | it will be rejected by its (now-forgotten) seq number. */ | 490 | * it if possible (hopefully during IDLE). Even if it comes up later |
491 | * it will be rejected by its (now-forgotten) seq number. | ||
492 | */ | ||
468 | 493 | ||
469 | if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { | 494 | if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { |
470 | drain_BMC2HOST(bt); | 495 | drain_BMC2HOST(bt); |
@@ -472,7 +497,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
472 | } | 497 | } |
473 | 498 | ||
474 | if ((bt->state != BT_STATE_IDLE) && | 499 | if ((bt->state != BT_STATE_IDLE) && |
475 | (bt->state < BT_STATE_PRINTME)) { /* check timeout */ | 500 | (bt->state < BT_STATE_PRINTME)) { |
501 | /* check timeout */ | ||
476 | bt->timeout -= time; | 502 | bt->timeout -= time; |
477 | if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) | 503 | if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) |
478 | return error_recovery(bt, | 504 | return error_recovery(bt, |
@@ -482,8 +508,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
482 | 508 | ||
483 | switch (bt->state) { | 509 | switch (bt->state) { |
484 | 510 | ||
485 | /* Idle state first checks for asynchronous messages from another | 511 | /* |
486 | channel, then does some opportunistic housekeeping. */ | 512 | * Idle state first checks for asynchronous messages from another |
513 | * channel, then does some opportunistic housekeeping. | ||
514 | */ | ||
487 | 515 | ||
488 | case BT_STATE_IDLE: | 516 | case BT_STATE_IDLE: |
489 | if (status & BT_SMS_ATN) { | 517 | if (status & BT_SMS_ATN) { |
@@ -531,16 +559,19 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
531 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); | 559 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); |
532 | BT_CONTROL(BT_H_BUSY); /* set */ | 560 | BT_CONTROL(BT_H_BUSY); /* set */ |
533 | 561 | ||
534 | /* Uncached, ordered writes should just proceeed serially but | 562 | /* |
535 | some BMCs don't clear B2H_ATN with one hit. Fast-path a | 563 | * Uncached, ordered writes should just proceeed serially but |
536 | workaround without too much penalty to the general case. */ | 564 | * some BMCs don't clear B2H_ATN with one hit. Fast-path a |
565 | * workaround without too much penalty to the general case. | ||
566 | */ | ||
537 | 567 | ||
538 | BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ | 568 | BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ |
539 | BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, | 569 | BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, |
540 | SI_SM_CALL_WITHOUT_DELAY); | 570 | SI_SM_CALL_WITHOUT_DELAY); |
541 | 571 | ||
542 | case BT_STATE_CLEAR_B2H: | 572 | case BT_STATE_CLEAR_B2H: |
543 | if (status & BT_B2H_ATN) { /* keep hitting it */ | 573 | if (status & BT_B2H_ATN) { |
574 | /* keep hitting it */ | ||
544 | BT_CONTROL(BT_B2H_ATN); | 575 | BT_CONTROL(BT_B2H_ATN); |
545 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); | 576 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); |
546 | } | 577 | } |
@@ -548,7 +579,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
548 | SI_SM_CALL_WITHOUT_DELAY); | 579 | SI_SM_CALL_WITHOUT_DELAY); |
549 | 580 | ||
550 | case BT_STATE_READ_BYTES: | 581 | case BT_STATE_READ_BYTES: |
551 | if (!(status & BT_H_BUSY)) /* check in case of retry */ | 582 | if (!(status & BT_H_BUSY)) |
583 | /* check in case of retry */ | ||
552 | BT_CONTROL(BT_H_BUSY); | 584 | BT_CONTROL(BT_H_BUSY); |
553 | BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ | 585 | BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ |
554 | i = read_all_bytes(bt); /* true == packet seq match */ | 586 | i = read_all_bytes(bt); /* true == packet seq match */ |
@@ -599,8 +631,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
599 | BT_STATE_CHANGE(BT_STATE_XACTION_START, | 631 | BT_STATE_CHANGE(BT_STATE_XACTION_START, |
600 | SI_SM_CALL_WITH_DELAY); | 632 | SI_SM_CALL_WITH_DELAY); |
601 | 633 | ||
602 | /* Get BT Capabilities, using timing of upper level state machine. | 634 | /* |
603 | Set outreqs to prevent infinite loop on timeout. */ | 635 | * Get BT Capabilities, using timing of upper level state machine. |
636 | * Set outreqs to prevent infinite loop on timeout. | ||
637 | */ | ||
604 | case BT_STATE_CAPABILITIES_BEGIN: | 638 | case BT_STATE_CAPABILITIES_BEGIN: |
605 | bt->BT_CAP_outreqs = 1; | 639 | bt->BT_CAP_outreqs = 1; |
606 | { | 640 | { |
@@ -638,10 +672,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
638 | 672 | ||
639 | static int bt_detect(struct si_sm_data *bt) | 673 | static int bt_detect(struct si_sm_data *bt) |
640 | { | 674 | { |
641 | /* It's impossible for the BT status and interrupt registers to be | 675 | /* |
642 | all 1's, (assuming a properly functioning, self-initialized BMC) | 676 | * It's impossible for the BT status and interrupt registers to be |
643 | but that's what you get from reading a bogus address, so we | 677 | * all 1's, (assuming a properly functioning, self-initialized BMC) |
644 | test that first. The calling routine uses negative logic. */ | 678 | * but that's what you get from reading a bogus address, so we |
679 | * test that first. The calling routine uses negative logic. | ||
680 | */ | ||
645 | 681 | ||
646 | if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) | 682 | if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) |
647 | return 1; | 683 | return 1; |
@@ -658,8 +694,7 @@ static int bt_size(void) | |||
658 | return sizeof(struct si_sm_data); | 694 | return sizeof(struct si_sm_data); |
659 | } | 695 | } |
660 | 696 | ||
661 | struct si_sm_handlers bt_smi_handlers = | 697 | struct si_sm_handlers bt_smi_handlers = { |
662 | { | ||
663 | .init_data = bt_init_data, | 698 | .init_data = bt_init_data, |
664 | .start_transaction = bt_start_transaction, | 699 | .start_transaction = bt_start_transaction, |
665 | .get_result = bt_get_result, | 700 | .get_result = bt_get_result, |
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index c1b8228cb7b6..80704875794c 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c | |||
@@ -60,37 +60,58 @@ MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); | |||
60 | 60 | ||
61 | /* The states the KCS driver may be in. */ | 61 | /* The states the KCS driver may be in. */ |
62 | enum kcs_states { | 62 | enum kcs_states { |
63 | KCS_IDLE, /* The KCS interface is currently | 63 | /* The KCS interface is currently doing nothing. */ |
64 | doing nothing. */ | 64 | KCS_IDLE, |
65 | KCS_START_OP, /* We are starting an operation. The | 65 | |
66 | data is in the output buffer, but | 66 | /* |
67 | nothing has been done to the | 67 | * We are starting an operation. The data is in the output |
68 | interface yet. This was added to | 68 | * buffer, but nothing has been done to the interface yet. This |
69 | the state machine in the spec to | 69 | * was added to the state machine in the spec to wait for the |
70 | wait for the initial IBF. */ | 70 | * initial IBF. |
71 | KCS_WAIT_WRITE_START, /* We have written a write cmd to the | 71 | */ |
72 | interface. */ | 72 | KCS_START_OP, |
73 | KCS_WAIT_WRITE, /* We are writing bytes to the | 73 | |
74 | interface. */ | 74 | /* We have written a write cmd to the interface. */ |
75 | KCS_WAIT_WRITE_END, /* We have written the write end cmd | 75 | KCS_WAIT_WRITE_START, |
76 | to the interface, and still need to | 76 | |
77 | write the last byte. */ | 77 | /* We are writing bytes to the interface. */ |
78 | KCS_WAIT_READ, /* We are waiting to read data from | 78 | KCS_WAIT_WRITE, |
79 | the interface. */ | 79 | |
80 | KCS_ERROR0, /* State to transition to the error | 80 | /* |
81 | handler, this was added to the | 81 | * We have written the write end cmd to the interface, and |
82 | state machine in the spec to be | 82 | * still need to write the last byte. |
83 | sure IBF was there. */ | 83 | */ |
84 | KCS_ERROR1, /* First stage error handler, wait for | 84 | KCS_WAIT_WRITE_END, |
85 | the interface to respond. */ | 85 | |
86 | KCS_ERROR2, /* The abort cmd has been written, | 86 | /* We are waiting to read data from the interface. */ |
87 | wait for the interface to | 87 | KCS_WAIT_READ, |
88 | respond. */ | 88 | |
89 | KCS_ERROR3, /* We wrote some data to the | 89 | /* |
90 | interface, wait for it to switch to | 90 | * State to transition to the error handler, this was added to |
91 | read mode. */ | 91 | * the state machine in the spec to be sure IBF was there. |
92 | KCS_HOSED /* The hardware failed to follow the | 92 | */ |
93 | state machine. */ | 93 | KCS_ERROR0, |
94 | |||
95 | /* | ||
96 | * First stage error handler, wait for the interface to | ||
97 | * respond. | ||
98 | */ | ||
99 | KCS_ERROR1, | ||
100 | |||
101 | /* | ||
102 | * The abort cmd has been written, wait for the interface to | ||
103 | * respond. | ||
104 | */ | ||
105 | KCS_ERROR2, | ||
106 | |||
107 | /* | ||
108 | * We wrote some data to the interface, wait for it to switch | ||
109 | * to read mode. | ||
110 | */ | ||
111 | KCS_ERROR3, | ||
112 | |||
113 | /* The hardware failed to follow the state machine. */ | ||
114 | KCS_HOSED | ||
94 | }; | 115 | }; |
95 | 116 | ||
96 | #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH | 117 | #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH |
@@ -102,8 +123,7 @@ enum kcs_states { | |||
102 | #define MAX_ERROR_RETRIES 10 | 123 | #define MAX_ERROR_RETRIES 10 |
103 | #define ERROR0_OBF_WAIT_JIFFIES (2*HZ) | 124 | #define ERROR0_OBF_WAIT_JIFFIES (2*HZ) |
104 | 125 | ||
105 | struct si_sm_data | 126 | struct si_sm_data { |
106 | { | ||
107 | enum kcs_states state; | 127 | enum kcs_states state; |
108 | struct si_sm_io *io; | 128 | struct si_sm_io *io; |
109 | unsigned char write_data[MAX_KCS_WRITE_SIZE]; | 129 | unsigned char write_data[MAX_KCS_WRITE_SIZE]; |
@@ -187,7 +207,8 @@ static inline void start_error_recovery(struct si_sm_data *kcs, char *reason) | |||
187 | (kcs->error_retries)++; | 207 | (kcs->error_retries)++; |
188 | if (kcs->error_retries > MAX_ERROR_RETRIES) { | 208 | if (kcs->error_retries > MAX_ERROR_RETRIES) { |
189 | if (kcs_debug & KCS_DEBUG_ENABLE) | 209 | if (kcs_debug & KCS_DEBUG_ENABLE) |
190 | printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason); | 210 | printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", |
211 | reason); | ||
191 | kcs->state = KCS_HOSED; | 212 | kcs->state = KCS_HOSED; |
192 | } else { | 213 | } else { |
193 | kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; | 214 | kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; |
@@ -271,10 +292,9 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, | |||
271 | 292 | ||
272 | if (kcs_debug & KCS_DEBUG_MSG) { | 293 | if (kcs_debug & KCS_DEBUG_MSG) { |
273 | printk(KERN_DEBUG "start_kcs_transaction -"); | 294 | printk(KERN_DEBUG "start_kcs_transaction -"); |
274 | for (i = 0; i < size; i ++) { | 295 | for (i = 0; i < size; i++) |
275 | printk(" %02x", (unsigned char) (data [i])); | 296 | printk(" %02x", (unsigned char) (data [i])); |
276 | } | 297 | printk("\n"); |
277 | printk ("\n"); | ||
278 | } | 298 | } |
279 | kcs->error_retries = 0; | 299 | kcs->error_retries = 0; |
280 | memcpy(kcs->write_data, data, size); | 300 | memcpy(kcs->write_data, data, size); |
@@ -305,9 +325,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data, | |||
305 | kcs->read_pos = 3; | 325 | kcs->read_pos = 3; |
306 | } | 326 | } |
307 | if (kcs->truncated) { | 327 | if (kcs->truncated) { |
308 | /* Report a truncated error. We might overwrite | 328 | /* |
309 | another error, but that's too bad, the user needs | 329 | * Report a truncated error. We might overwrite |
310 | to know it was truncated. */ | 330 | * another error, but that's too bad, the user needs |
331 | * to know it was truncated. | ||
332 | */ | ||
311 | data[2] = IPMI_ERR_MSG_TRUNCATED; | 333 | data[2] = IPMI_ERR_MSG_TRUNCATED; |
312 | kcs->truncated = 0; | 334 | kcs->truncated = 0; |
313 | } | 335 | } |
@@ -315,9 +337,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data, | |||
315 | return kcs->read_pos; | 337 | return kcs->read_pos; |
316 | } | 338 | } |
317 | 339 | ||
318 | /* This implements the state machine defined in the IPMI manual, see | 340 | /* |
319 | that for details on how this works. Divide that flowchart into | 341 | * This implements the state machine defined in the IPMI manual, see |
320 | sections delimited by "Wait for IBF" and this will become clear. */ | 342 | * that for details on how this works. Divide that flowchart into |
343 | * sections delimited by "Wait for IBF" and this will become clear. | ||
344 | */ | ||
321 | static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | 345 | static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) |
322 | { | 346 | { |
323 | unsigned char status; | 347 | unsigned char status; |
@@ -388,11 +412,12 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
388 | write_next_byte(kcs); | 412 | write_next_byte(kcs); |
389 | } | 413 | } |
390 | break; | 414 | break; |
391 | 415 | ||
392 | case KCS_WAIT_WRITE_END: | 416 | case KCS_WAIT_WRITE_END: |
393 | if (state != KCS_WRITE_STATE) { | 417 | if (state != KCS_WRITE_STATE) { |
394 | start_error_recovery(kcs, | 418 | start_error_recovery(kcs, |
395 | "Not in write state for write end"); | 419 | "Not in write state" |
420 | " for write end"); | ||
396 | break; | 421 | break; |
397 | } | 422 | } |
398 | clear_obf(kcs, status); | 423 | clear_obf(kcs, status); |
@@ -413,13 +438,15 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
413 | return SI_SM_CALL_WITH_DELAY; | 438 | return SI_SM_CALL_WITH_DELAY; |
414 | read_next_byte(kcs); | 439 | read_next_byte(kcs); |
415 | } else { | 440 | } else { |
416 | /* We don't implement this exactly like the state | 441 | /* |
417 | machine in the spec. Some broken hardware | 442 | * We don't implement this exactly like the state |
418 | does not write the final dummy byte to the | 443 | * machine in the spec. Some broken hardware |
419 | read register. Thus obf will never go high | 444 | * does not write the final dummy byte to the |
420 | here. We just go straight to idle, and we | 445 | * read register. Thus obf will never go high |
421 | handle clearing out obf in idle state if it | 446 | * here. We just go straight to idle, and we |
422 | happens to come in. */ | 447 | * handle clearing out obf in idle state if it |
448 | * happens to come in. | ||
449 | */ | ||
423 | clear_obf(kcs, status); | 450 | clear_obf(kcs, status); |
424 | kcs->orig_write_count = 0; | 451 | kcs->orig_write_count = 0; |
425 | kcs->state = KCS_IDLE; | 452 | kcs->state = KCS_IDLE; |
@@ -430,7 +457,8 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
430 | case KCS_ERROR0: | 457 | case KCS_ERROR0: |
431 | clear_obf(kcs, status); | 458 | clear_obf(kcs, status); |
432 | status = read_status(kcs); | 459 | status = read_status(kcs); |
433 | if (GET_STATUS_OBF(status)) /* controller isn't responding */ | 460 | if (GET_STATUS_OBF(status)) |
461 | /* controller isn't responding */ | ||
434 | if (time_before(jiffies, kcs->error0_timeout)) | 462 | if (time_before(jiffies, kcs->error0_timeout)) |
435 | return SI_SM_CALL_WITH_TICK_DELAY; | 463 | return SI_SM_CALL_WITH_TICK_DELAY; |
436 | write_cmd(kcs, KCS_GET_STATUS_ABORT); | 464 | write_cmd(kcs, KCS_GET_STATUS_ABORT); |
@@ -442,7 +470,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
442 | write_data(kcs, 0); | 470 | write_data(kcs, 0); |
443 | kcs->state = KCS_ERROR2; | 471 | kcs->state = KCS_ERROR2; |
444 | break; | 472 | break; |
445 | 473 | ||
446 | case KCS_ERROR2: | 474 | case KCS_ERROR2: |
447 | if (state != KCS_READ_STATE) { | 475 | if (state != KCS_READ_STATE) { |
448 | start_error_recovery(kcs, | 476 | start_error_recovery(kcs, |
@@ -456,7 +484,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
456 | write_data(kcs, KCS_READ_BYTE); | 484 | write_data(kcs, KCS_READ_BYTE); |
457 | kcs->state = KCS_ERROR3; | 485 | kcs->state = KCS_ERROR3; |
458 | break; | 486 | break; |
459 | 487 | ||
460 | case KCS_ERROR3: | 488 | case KCS_ERROR3: |
461 | if (state != KCS_IDLE_STATE) { | 489 | if (state != KCS_IDLE_STATE) { |
462 | start_error_recovery(kcs, | 490 | start_error_recovery(kcs, |
@@ -475,7 +503,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
475 | return SI_SM_TRANSACTION_COMPLETE; | 503 | return SI_SM_TRANSACTION_COMPLETE; |
476 | } | 504 | } |
477 | break; | 505 | break; |
478 | 506 | ||
479 | case KCS_HOSED: | 507 | case KCS_HOSED: |
480 | break; | 508 | break; |
481 | } | 509 | } |
@@ -495,10 +523,12 @@ static int kcs_size(void) | |||
495 | 523 | ||
496 | static int kcs_detect(struct si_sm_data *kcs) | 524 | static int kcs_detect(struct si_sm_data *kcs) |
497 | { | 525 | { |
498 | /* It's impossible for the KCS status register to be all 1's, | 526 | /* |
499 | (assuming a properly functioning, self-initialized BMC) | 527 | * It's impossible for the KCS status register to be all 1's, |
500 | but that's what you get from reading a bogus address, so we | 528 | * (assuming a properly functioning, self-initialized BMC) |
501 | test that first. */ | 529 | * but that's what you get from reading a bogus address, so we |
530 | * test that first. | ||
531 | */ | ||
502 | if (read_status(kcs) == 0xff) | 532 | if (read_status(kcs) == 0xff) |
503 | return 1; | 533 | return 1; |
504 | 534 | ||
@@ -509,8 +539,7 @@ static void kcs_cleanup(struct si_sm_data *kcs) | |||
509 | { | 539 | { |
510 | } | 540 | } |
511 | 541 | ||
512 | struct si_sm_handlers kcs_smi_handlers = | 542 | struct si_sm_handlers kcs_smi_handlers = { |
513 | { | ||
514 | .init_data = init_kcs_data, | 543 | .init_data = init_kcs_data, |
515 | .start_transaction = start_kcs_transaction, | 544 | .start_transaction = start_kcs_transaction, |
516 | .get_result = get_kcs_result, | 545 | .get_result = get_kcs_result, |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 32b2b22996dc..8a59aaa21be5 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | #define PFX "IPMI message handler: " | 48 | #define PFX "IPMI message handler: " |
49 | 49 | ||
50 | #define IPMI_DRIVER_VERSION "39.1" | 50 | #define IPMI_DRIVER_VERSION "39.2" |
51 | 51 | ||
52 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); | 52 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); |
53 | static int ipmi_init_msghandler(void); | 53 | static int ipmi_init_msghandler(void); |
@@ -63,16 +63,16 @@ static struct proc_dir_entry *proc_ipmi_root; | |||
63 | 63 | ||
64 | #define MAX_EVENTS_IN_QUEUE 25 | 64 | #define MAX_EVENTS_IN_QUEUE 25 |
65 | 65 | ||
66 | /* Don't let a message sit in a queue forever, always time it with at lest | 66 | /* |
67 | the max message timer. This is in milliseconds. */ | 67 | * Don't let a message sit in a queue forever, always time it with at lest |
68 | * the max message timer. This is in milliseconds. | ||
69 | */ | ||
68 | #define MAX_MSG_TIMEOUT 60000 | 70 | #define MAX_MSG_TIMEOUT 60000 |
69 | 71 | ||
70 | |||
71 | /* | 72 | /* |
72 | * The main "user" data structure. | 73 | * The main "user" data structure. |
73 | */ | 74 | */ |
74 | struct ipmi_user | 75 | struct ipmi_user { |
75 | { | ||
76 | struct list_head link; | 76 | struct list_head link; |
77 | 77 | ||
78 | /* Set to "0" when the user is destroyed. */ | 78 | /* Set to "0" when the user is destroyed. */ |
@@ -91,8 +91,7 @@ struct ipmi_user | |||
91 | int gets_events; | 91 | int gets_events; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | struct cmd_rcvr | 94 | struct cmd_rcvr { |
95 | { | ||
96 | struct list_head link; | 95 | struct list_head link; |
97 | 96 | ||
98 | ipmi_user_t user; | 97 | ipmi_user_t user; |
@@ -106,12 +105,12 @@ struct cmd_rcvr | |||
106 | * or change any data until the RCU period completes. So we | 105 | * or change any data until the RCU period completes. So we |
107 | * use this next variable during mass deletion so we can have | 106 | * use this next variable during mass deletion so we can have |
108 | * a list and don't have to wait and restart the search on | 107 | * a list and don't have to wait and restart the search on |
109 | * every individual deletion of a command. */ | 108 | * every individual deletion of a command. |
109 | */ | ||
110 | struct cmd_rcvr *next; | 110 | struct cmd_rcvr *next; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | struct seq_table | 113 | struct seq_table { |
114 | { | ||
115 | unsigned int inuse : 1; | 114 | unsigned int inuse : 1; |
116 | unsigned int broadcast : 1; | 115 | unsigned int broadcast : 1; |
117 | 116 | ||
@@ -119,53 +118,60 @@ struct seq_table | |||
119 | unsigned long orig_timeout; | 118 | unsigned long orig_timeout; |
120 | unsigned int retries_left; | 119 | unsigned int retries_left; |
121 | 120 | ||
122 | /* To verify on an incoming send message response that this is | 121 | /* |
123 | the message that the response is for, we keep a sequence id | 122 | * To verify on an incoming send message response that this is |
124 | and increment it every time we send a message. */ | 123 | * the message that the response is for, we keep a sequence id |
124 | * and increment it every time we send a message. | ||
125 | */ | ||
125 | long seqid; | 126 | long seqid; |
126 | 127 | ||
127 | /* This is held so we can properly respond to the message on a | 128 | /* |
128 | timeout, and it is used to hold the temporary data for | 129 | * This is held so we can properly respond to the message on a |
129 | retransmission, too. */ | 130 | * timeout, and it is used to hold the temporary data for |
131 | * retransmission, too. | ||
132 | */ | ||
130 | struct ipmi_recv_msg *recv_msg; | 133 | struct ipmi_recv_msg *recv_msg; |
131 | }; | 134 | }; |
132 | 135 | ||
133 | /* Store the information in a msgid (long) to allow us to find a | 136 | /* |
134 | sequence table entry from the msgid. */ | 137 | * Store the information in a msgid (long) to allow us to find a |
138 | * sequence table entry from the msgid. | ||
139 | */ | ||
135 | #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) | 140 | #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) |
136 | 141 | ||
137 | #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ | 142 | #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ |
138 | do { \ | 143 | do { \ |
139 | seq = ((msgid >> 26) & 0x3f); \ | 144 | seq = ((msgid >> 26) & 0x3f); \ |
140 | seqid = (msgid & 0x3fffff); \ | 145 | seqid = (msgid & 0x3fffff); \ |
141 | } while (0) | 146 | } while (0) |
142 | 147 | ||
143 | #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) | 148 | #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) |
144 | 149 | ||
145 | struct ipmi_channel | 150 | struct ipmi_channel { |
146 | { | ||
147 | unsigned char medium; | 151 | unsigned char medium; |
148 | unsigned char protocol; | 152 | unsigned char protocol; |
149 | 153 | ||
150 | /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, | 154 | /* |
151 | but may be changed by the user. */ | 155 | * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, |
156 | * but may be changed by the user. | ||
157 | */ | ||
152 | unsigned char address; | 158 | unsigned char address; |
153 | 159 | ||
154 | /* My LUN. This should generally stay the SMS LUN, but just in | 160 | /* |
155 | case... */ | 161 | * My LUN. This should generally stay the SMS LUN, but just in |
162 | * case... | ||
163 | */ | ||
156 | unsigned char lun; | 164 | unsigned char lun; |
157 | }; | 165 | }; |
158 | 166 | ||
159 | #ifdef CONFIG_PROC_FS | 167 | #ifdef CONFIG_PROC_FS |
160 | struct ipmi_proc_entry | 168 | struct ipmi_proc_entry { |
161 | { | ||
162 | char *name; | 169 | char *name; |
163 | struct ipmi_proc_entry *next; | 170 | struct ipmi_proc_entry *next; |
164 | }; | 171 | }; |
165 | #endif | 172 | #endif |
166 | 173 | ||
167 | struct bmc_device | 174 | struct bmc_device { |
168 | { | ||
169 | struct platform_device *dev; | 175 | struct platform_device *dev; |
170 | struct ipmi_device_id id; | 176 | struct ipmi_device_id id; |
171 | unsigned char guid[16]; | 177 | unsigned char guid[16]; |
@@ -186,10 +192,108 @@ struct bmc_device | |||
186 | struct device_attribute aux_firmware_rev_attr; | 192 | struct device_attribute aux_firmware_rev_attr; |
187 | }; | 193 | }; |
188 | 194 | ||
195 | /* | ||
196 | * Various statistics for IPMI, these index stats[] in the ipmi_smi | ||
197 | * structure. | ||
198 | */ | ||
199 | enum ipmi_stat_indexes { | ||
200 | /* Commands we got from the user that were invalid. */ | ||
201 | IPMI_STAT_sent_invalid_commands = 0, | ||
202 | |||
203 | /* Commands we sent to the MC. */ | ||
204 | IPMI_STAT_sent_local_commands, | ||
205 | |||
206 | /* Responses from the MC that were delivered to a user. */ | ||
207 | IPMI_STAT_handled_local_responses, | ||
208 | |||
209 | /* Responses from the MC that were not delivered to a user. */ | ||
210 | IPMI_STAT_unhandled_local_responses, | ||
211 | |||
212 | /* Commands we sent out to the IPMB bus. */ | ||
213 | IPMI_STAT_sent_ipmb_commands, | ||
214 | |||
215 | /* Commands sent on the IPMB that had errors on the SEND CMD */ | ||
216 | IPMI_STAT_sent_ipmb_command_errs, | ||
217 | |||
218 | /* Each retransmit increments this count. */ | ||
219 | IPMI_STAT_retransmitted_ipmb_commands, | ||
220 | |||
221 | /* | ||
222 | * When a message times out (runs out of retransmits) this is | ||
223 | * incremented. | ||
224 | */ | ||
225 | IPMI_STAT_timed_out_ipmb_commands, | ||
226 | |||
227 | /* | ||
228 | * This is like above, but for broadcasts. Broadcasts are | ||
229 | * *not* included in the above count (they are expected to | ||
230 | * time out). | ||
231 | */ | ||
232 | IPMI_STAT_timed_out_ipmb_broadcasts, | ||
233 | |||
234 | /* Responses I have sent to the IPMB bus. */ | ||
235 | IPMI_STAT_sent_ipmb_responses, | ||
236 | |||
237 | /* The response was delivered to the user. */ | ||
238 | IPMI_STAT_handled_ipmb_responses, | ||
239 | |||
240 | /* The response had invalid data in it. */ | ||
241 | IPMI_STAT_invalid_ipmb_responses, | ||
242 | |||
243 | /* The response didn't have anyone waiting for it. */ | ||
244 | IPMI_STAT_unhandled_ipmb_responses, | ||
245 | |||
246 | /* Commands we sent out to the IPMB bus. */ | ||
247 | IPMI_STAT_sent_lan_commands, | ||
248 | |||
249 | /* Commands sent on the IPMB that had errors on the SEND CMD */ | ||
250 | IPMI_STAT_sent_lan_command_errs, | ||
251 | |||
252 | /* Each retransmit increments this count. */ | ||
253 | IPMI_STAT_retransmitted_lan_commands, | ||
254 | |||
255 | /* | ||
256 | * When a message times out (runs out of retransmits) this is | ||
257 | * incremented. | ||
258 | */ | ||
259 | IPMI_STAT_timed_out_lan_commands, | ||
260 | |||
261 | /* Responses I have sent to the IPMB bus. */ | ||
262 | IPMI_STAT_sent_lan_responses, | ||
263 | |||
264 | /* The response was delivered to the user. */ | ||
265 | IPMI_STAT_handled_lan_responses, | ||
266 | |||
267 | /* The response had invalid data in it. */ | ||
268 | IPMI_STAT_invalid_lan_responses, | ||
269 | |||
270 | /* The response didn't have anyone waiting for it. */ | ||
271 | IPMI_STAT_unhandled_lan_responses, | ||
272 | |||
273 | /* The command was delivered to the user. */ | ||
274 | IPMI_STAT_handled_commands, | ||
275 | |||
276 | /* The command had invalid data in it. */ | ||
277 | IPMI_STAT_invalid_commands, | ||
278 | |||
279 | /* The command didn't have anyone waiting for it. */ | ||
280 | IPMI_STAT_unhandled_commands, | ||
281 | |||
282 | /* Invalid data in an event. */ | ||
283 | IPMI_STAT_invalid_events, | ||
284 | |||
285 | /* Events that were received with the proper format. */ | ||
286 | IPMI_STAT_events, | ||
287 | |||
288 | |||
289 | /* This *must* remain last, add new values above this. */ | ||
290 | IPMI_NUM_STATS | ||
291 | }; | ||
292 | |||
293 | |||
189 | #define IPMI_IPMB_NUM_SEQ 64 | 294 | #define IPMI_IPMB_NUM_SEQ 64 |
190 | #define IPMI_MAX_CHANNELS 16 | 295 | #define IPMI_MAX_CHANNELS 16 |
191 | struct ipmi_smi | 296 | struct ipmi_smi { |
192 | { | ||
193 | /* What interface number are we? */ | 297 | /* What interface number are we? */ |
194 | int intf_num; | 298 | int intf_num; |
195 | 299 | ||
@@ -198,8 +302,10 @@ struct ipmi_smi | |||
198 | /* Used for a list of interfaces. */ | 302 | /* Used for a list of interfaces. */ |
199 | struct list_head link; | 303 | struct list_head link; |
200 | 304 | ||
201 | /* The list of upper layers that are using me. seq_lock | 305 | /* |
202 | * protects this. */ | 306 | * The list of upper layers that are using me. seq_lock |
307 | * protects this. | ||
308 | */ | ||
203 | struct list_head users; | 309 | struct list_head users; |
204 | 310 | ||
205 | /* Information to supply to users. */ | 311 | /* Information to supply to users. */ |
@@ -213,10 +319,12 @@ struct ipmi_smi | |||
213 | char *my_dev_name; | 319 | char *my_dev_name; |
214 | char *sysfs_name; | 320 | char *sysfs_name; |
215 | 321 | ||
216 | /* This is the lower-layer's sender routine. Note that you | 322 | /* |
323 | * This is the lower-layer's sender routine. Note that you | ||
217 | * must either be holding the ipmi_interfaces_mutex or be in | 324 | * must either be holding the ipmi_interfaces_mutex or be in |
218 | * an umpreemptible region to use this. You must fetch the | 325 | * an umpreemptible region to use this. You must fetch the |
219 | * value into a local variable and make sure it is not NULL. */ | 326 | * value into a local variable and make sure it is not NULL. |
327 | */ | ||
220 | struct ipmi_smi_handlers *handlers; | 328 | struct ipmi_smi_handlers *handlers; |
221 | void *send_info; | 329 | void *send_info; |
222 | 330 | ||
@@ -229,34 +337,45 @@ struct ipmi_smi | |||
229 | /* Driver-model device for the system interface. */ | 337 | /* Driver-model device for the system interface. */ |
230 | struct device *si_dev; | 338 | struct device *si_dev; |
231 | 339 | ||
232 | /* A table of sequence numbers for this interface. We use the | 340 | /* |
233 | sequence numbers for IPMB messages that go out of the | 341 | * A table of sequence numbers for this interface. We use the |
234 | interface to match them up with their responses. A routine | 342 | * sequence numbers for IPMB messages that go out of the |
235 | is called periodically to time the items in this list. */ | 343 | * interface to match them up with their responses. A routine |
344 | * is called periodically to time the items in this list. | ||
345 | */ | ||
236 | spinlock_t seq_lock; | 346 | spinlock_t seq_lock; |
237 | struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; | 347 | struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; |
238 | int curr_seq; | 348 | int curr_seq; |
239 | 349 | ||
240 | /* Messages that were delayed for some reason (out of memory, | 350 | /* |
241 | for instance), will go in here to be processed later in a | 351 | * Messages that were delayed for some reason (out of memory, |
242 | periodic timer interrupt. */ | 352 | * for instance), will go in here to be processed later in a |
353 | * periodic timer interrupt. | ||
354 | */ | ||
243 | spinlock_t waiting_msgs_lock; | 355 | spinlock_t waiting_msgs_lock; |
244 | struct list_head waiting_msgs; | 356 | struct list_head waiting_msgs; |
245 | 357 | ||
246 | /* The list of command receivers that are registered for commands | 358 | /* |
247 | on this interface. */ | 359 | * The list of command receivers that are registered for commands |
360 | * on this interface. | ||
361 | */ | ||
248 | struct mutex cmd_rcvrs_mutex; | 362 | struct mutex cmd_rcvrs_mutex; |
249 | struct list_head cmd_rcvrs; | 363 | struct list_head cmd_rcvrs; |
250 | 364 | ||
251 | /* Events that were queues because no one was there to receive | 365 | /* |
252 | them. */ | 366 | * Events that were queues because no one was there to receive |
367 | * them. | ||
368 | */ | ||
253 | spinlock_t events_lock; /* For dealing with event stuff. */ | 369 | spinlock_t events_lock; /* For dealing with event stuff. */ |
254 | struct list_head waiting_events; | 370 | struct list_head waiting_events; |
255 | unsigned int waiting_events_count; /* How many events in queue? */ | 371 | unsigned int waiting_events_count; /* How many events in queue? */ |
256 | int delivering_events; | 372 | char delivering_events; |
373 | char event_msg_printed; | ||
257 | 374 | ||
258 | /* The event receiver for my BMC, only really used at panic | 375 | /* |
259 | shutdown as a place to store this. */ | 376 | * The event receiver for my BMC, only really used at panic |
377 | * shutdown as a place to store this. | ||
378 | */ | ||
260 | unsigned char event_receiver; | 379 | unsigned char event_receiver; |
261 | unsigned char event_receiver_lun; | 380 | unsigned char event_receiver_lun; |
262 | unsigned char local_sel_device; | 381 | unsigned char local_sel_device; |
@@ -268,14 +387,18 @@ struct ipmi_smi | |||
268 | int auto_maintenance_timeout; | 387 | int auto_maintenance_timeout; |
269 | spinlock_t maintenance_mode_lock; /* Used in a timer... */ | 388 | spinlock_t maintenance_mode_lock; /* Used in a timer... */ |
270 | 389 | ||
271 | /* A cheap hack, if this is non-null and a message to an | 390 | /* |
272 | interface comes in with a NULL user, call this routine with | 391 | * A cheap hack, if this is non-null and a message to an |
273 | it. Note that the message will still be freed by the | 392 | * interface comes in with a NULL user, call this routine with |
274 | caller. This only works on the system interface. */ | 393 | * it. Note that the message will still be freed by the |
394 | * caller. This only works on the system interface. | ||
395 | */ | ||
275 | void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); | 396 | void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); |
276 | 397 | ||
277 | /* When we are scanning the channels for an SMI, this will | 398 | /* |
278 | tell which channel we are scanning. */ | 399 | * When we are scanning the channels for an SMI, this will |
400 | * tell which channel we are scanning. | ||
401 | */ | ||
279 | int curr_channel; | 402 | int curr_channel; |
280 | 403 | ||
281 | /* Channel information */ | 404 | /* Channel information */ |
@@ -285,74 +408,14 @@ struct ipmi_smi | |||
285 | struct proc_dir_entry *proc_dir; | 408 | struct proc_dir_entry *proc_dir; |
286 | char proc_dir_name[10]; | 409 | char proc_dir_name[10]; |
287 | 410 | ||
288 | spinlock_t counter_lock; /* For making counters atomic. */ | 411 | atomic_t stats[IPMI_NUM_STATS]; |
289 | |||
290 | /* Commands we got that were invalid. */ | ||
291 | unsigned int sent_invalid_commands; | ||
292 | |||
293 | /* Commands we sent to the MC. */ | ||
294 | unsigned int sent_local_commands; | ||
295 | /* Responses from the MC that were delivered to a user. */ | ||
296 | unsigned int handled_local_responses; | ||
297 | /* Responses from the MC that were not delivered to a user. */ | ||
298 | unsigned int unhandled_local_responses; | ||
299 | |||
300 | /* Commands we sent out to the IPMB bus. */ | ||
301 | unsigned int sent_ipmb_commands; | ||
302 | /* Commands sent on the IPMB that had errors on the SEND CMD */ | ||
303 | unsigned int sent_ipmb_command_errs; | ||
304 | /* Each retransmit increments this count. */ | ||
305 | unsigned int retransmitted_ipmb_commands; | ||
306 | /* When a message times out (runs out of retransmits) this is | ||
307 | incremented. */ | ||
308 | unsigned int timed_out_ipmb_commands; | ||
309 | |||
310 | /* This is like above, but for broadcasts. Broadcasts are | ||
311 | *not* included in the above count (they are expected to | ||
312 | time out). */ | ||
313 | unsigned int timed_out_ipmb_broadcasts; | ||
314 | 412 | ||
315 | /* Responses I have sent to the IPMB bus. */ | 413 | /* |
316 | unsigned int sent_ipmb_responses; | 414 | * run_to_completion duplicate of smb_info, smi_info |
317 | 415 | * and ipmi_serial_info structures. Used to decrease numbers of | |
318 | /* The response was delivered to the user. */ | 416 | * parameters passed by "low" level IPMI code. |
319 | unsigned int handled_ipmb_responses; | 417 | */ |
320 | /* The response had invalid data in it. */ | 418 | int run_to_completion; |
321 | unsigned int invalid_ipmb_responses; | ||
322 | /* The response didn't have anyone waiting for it. */ | ||
323 | unsigned int unhandled_ipmb_responses; | ||
324 | |||
325 | /* Commands we sent out to the IPMB bus. */ | ||
326 | unsigned int sent_lan_commands; | ||
327 | /* Commands sent on the IPMB that had errors on the SEND CMD */ | ||
328 | unsigned int sent_lan_command_errs; | ||
329 | /* Each retransmit increments this count. */ | ||
330 | unsigned int retransmitted_lan_commands; | ||
331 | /* When a message times out (runs out of retransmits) this is | ||
332 | incremented. */ | ||
333 | unsigned int timed_out_lan_commands; | ||
334 | |||
335 | /* Responses I have sent to the IPMB bus. */ | ||
336 | unsigned int sent_lan_responses; | ||
337 | |||
338 | /* The response was delivered to the user. */ | ||
339 | unsigned int handled_lan_responses; | ||
340 | /* The response had invalid data in it. */ | ||
341 | unsigned int invalid_lan_responses; | ||
342 | /* The response didn't have anyone waiting for it. */ | ||
343 | unsigned int unhandled_lan_responses; | ||
344 | |||
345 | /* The command was delivered to the user. */ | ||
346 | unsigned int handled_commands; | ||
347 | /* The command had invalid data in it. */ | ||
348 | unsigned int invalid_commands; | ||
349 | /* The command didn't have anyone waiting for it. */ | ||
350 | unsigned int unhandled_commands; | ||
351 | |||
352 | /* Invalid data in an event. */ | ||
353 | unsigned int invalid_events; | ||
354 | /* Events that were received with the proper format. */ | ||
355 | unsigned int events; | ||
356 | }; | 419 | }; |
357 | #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) | 420 | #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) |
358 | 421 | ||
@@ -368,12 +431,19 @@ static DEFINE_MUTEX(ipmidriver_mutex); | |||
368 | static LIST_HEAD(ipmi_interfaces); | 431 | static LIST_HEAD(ipmi_interfaces); |
369 | static DEFINE_MUTEX(ipmi_interfaces_mutex); | 432 | static DEFINE_MUTEX(ipmi_interfaces_mutex); |
370 | 433 | ||
371 | /* List of watchers that want to know when smi's are added and | 434 | /* |
372 | deleted. */ | 435 | * List of watchers that want to know when smi's are added and deleted. |
436 | */ | ||
373 | static LIST_HEAD(smi_watchers); | 437 | static LIST_HEAD(smi_watchers); |
374 | static DEFINE_MUTEX(smi_watchers_mutex); | 438 | static DEFINE_MUTEX(smi_watchers_mutex); |
375 | 439 | ||
376 | 440 | ||
441 | #define ipmi_inc_stat(intf, stat) \ | ||
442 | atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) | ||
443 | #define ipmi_get_stat(intf, stat) \ | ||
444 | ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) | ||
445 | |||
446 | |||
377 | static void free_recv_msg_list(struct list_head *q) | 447 | static void free_recv_msg_list(struct list_head *q) |
378 | { | 448 | { |
379 | struct ipmi_recv_msg *msg, *msg2; | 449 | struct ipmi_recv_msg *msg, *msg2; |
@@ -417,10 +487,8 @@ static void clean_up_interface_data(ipmi_smi_t intf) | |||
417 | 487 | ||
418 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { | 488 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { |
419 | if ((intf->seq_table[i].inuse) | 489 | if ((intf->seq_table[i].inuse) |
420 | && (intf->seq_table[i].recv_msg)) | 490 | && (intf->seq_table[i].recv_msg)) |
421 | { | ||
422 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); | 491 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); |
423 | } | ||
424 | } | 492 | } |
425 | } | 493 | } |
426 | 494 | ||
@@ -487,6 +555,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) | |||
487 | } | 555 | } |
488 | return -ENOMEM; | 556 | return -ENOMEM; |
489 | } | 557 | } |
558 | EXPORT_SYMBOL(ipmi_smi_watcher_register); | ||
490 | 559 | ||
491 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) | 560 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) |
492 | { | 561 | { |
@@ -495,6 +564,7 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) | |||
495 | mutex_unlock(&smi_watchers_mutex); | 564 | mutex_unlock(&smi_watchers_mutex); |
496 | return 0; | 565 | return 0; |
497 | } | 566 | } |
567 | EXPORT_SYMBOL(ipmi_smi_watcher_unregister); | ||
498 | 568 | ||
499 | /* | 569 | /* |
500 | * Must be called with smi_watchers_mutex held. | 570 | * Must be called with smi_watchers_mutex held. |
@@ -530,8 +600,7 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) | |||
530 | } | 600 | } |
531 | 601 | ||
532 | if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE) | 602 | if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE) |
533 | || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) | 603 | || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) { |
534 | { | ||
535 | struct ipmi_ipmb_addr *ipmb_addr1 | 604 | struct ipmi_ipmb_addr *ipmb_addr1 |
536 | = (struct ipmi_ipmb_addr *) addr1; | 605 | = (struct ipmi_ipmb_addr *) addr1; |
537 | struct ipmi_ipmb_addr *ipmb_addr2 | 606 | struct ipmi_ipmb_addr *ipmb_addr2 |
@@ -559,9 +628,8 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) | |||
559 | 628 | ||
560 | int ipmi_validate_addr(struct ipmi_addr *addr, int len) | 629 | int ipmi_validate_addr(struct ipmi_addr *addr, int len) |
561 | { | 630 | { |
562 | if (len < sizeof(struct ipmi_system_interface_addr)) { | 631 | if (len < sizeof(struct ipmi_system_interface_addr)) |
563 | return -EINVAL; | 632 | return -EINVAL; |
564 | } | ||
565 | 633 | ||
566 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { | 634 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { |
567 | if (addr->channel != IPMI_BMC_CHANNEL) | 635 | if (addr->channel != IPMI_BMC_CHANNEL) |
@@ -575,23 +643,21 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len) | |||
575 | return -EINVAL; | 643 | return -EINVAL; |
576 | 644 | ||
577 | if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) | 645 | if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) |
578 | || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) | 646 | || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) { |
579 | { | 647 | if (len < sizeof(struct ipmi_ipmb_addr)) |
580 | if (len < sizeof(struct ipmi_ipmb_addr)) { | ||
581 | return -EINVAL; | 648 | return -EINVAL; |
582 | } | ||
583 | return 0; | 649 | return 0; |
584 | } | 650 | } |
585 | 651 | ||
586 | if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { | 652 | if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { |
587 | if (len < sizeof(struct ipmi_lan_addr)) { | 653 | if (len < sizeof(struct ipmi_lan_addr)) |
588 | return -EINVAL; | 654 | return -EINVAL; |
589 | } | ||
590 | return 0; | 655 | return 0; |
591 | } | 656 | } |
592 | 657 | ||
593 | return -EINVAL; | 658 | return -EINVAL; |
594 | } | 659 | } |
660 | EXPORT_SYMBOL(ipmi_validate_addr); | ||
595 | 661 | ||
596 | unsigned int ipmi_addr_length(int addr_type) | 662 | unsigned int ipmi_addr_length(int addr_type) |
597 | { | 663 | { |
@@ -599,34 +665,28 @@ unsigned int ipmi_addr_length(int addr_type) | |||
599 | return sizeof(struct ipmi_system_interface_addr); | 665 | return sizeof(struct ipmi_system_interface_addr); |
600 | 666 | ||
601 | if ((addr_type == IPMI_IPMB_ADDR_TYPE) | 667 | if ((addr_type == IPMI_IPMB_ADDR_TYPE) |
602 | || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) | 668 | || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) |
603 | { | ||
604 | return sizeof(struct ipmi_ipmb_addr); | 669 | return sizeof(struct ipmi_ipmb_addr); |
605 | } | ||
606 | 670 | ||
607 | if (addr_type == IPMI_LAN_ADDR_TYPE) | 671 | if (addr_type == IPMI_LAN_ADDR_TYPE) |
608 | return sizeof(struct ipmi_lan_addr); | 672 | return sizeof(struct ipmi_lan_addr); |
609 | 673 | ||
610 | return 0; | 674 | return 0; |
611 | } | 675 | } |
676 | EXPORT_SYMBOL(ipmi_addr_length); | ||
612 | 677 | ||
613 | static void deliver_response(struct ipmi_recv_msg *msg) | 678 | static void deliver_response(struct ipmi_recv_msg *msg) |
614 | { | 679 | { |
615 | if (!msg->user) { | 680 | if (!msg->user) { |
616 | ipmi_smi_t intf = msg->user_msg_data; | 681 | ipmi_smi_t intf = msg->user_msg_data; |
617 | unsigned long flags; | ||
618 | 682 | ||
619 | /* Special handling for NULL users. */ | 683 | /* Special handling for NULL users. */ |
620 | if (intf->null_user_handler) { | 684 | if (intf->null_user_handler) { |
621 | intf->null_user_handler(intf, msg); | 685 | intf->null_user_handler(intf, msg); |
622 | spin_lock_irqsave(&intf->counter_lock, flags); | 686 | ipmi_inc_stat(intf, handled_local_responses); |
623 | intf->handled_local_responses++; | ||
624 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
625 | } else { | 687 | } else { |
626 | /* No handler, so give up. */ | 688 | /* No handler, so give up. */ |
627 | spin_lock_irqsave(&intf->counter_lock, flags); | 689 | ipmi_inc_stat(intf, unhandled_local_responses); |
628 | intf->unhandled_local_responses++; | ||
629 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
630 | } | 690 | } |
631 | ipmi_free_recv_msg(msg); | 691 | ipmi_free_recv_msg(msg); |
632 | } else { | 692 | } else { |
@@ -646,9 +706,11 @@ deliver_err_response(struct ipmi_recv_msg *msg, int err) | |||
646 | deliver_response(msg); | 706 | deliver_response(msg); |
647 | } | 707 | } |
648 | 708 | ||
649 | /* Find the next sequence number not being used and add the given | 709 | /* |
650 | message with the given timeout to the sequence table. This must be | 710 | * Find the next sequence number not being used and add the given |
651 | called with the interface's seq_lock held. */ | 711 | * message with the given timeout to the sequence table. This must be |
712 | * called with the interface's seq_lock held. | ||
713 | */ | ||
652 | static int intf_next_seq(ipmi_smi_t intf, | 714 | static int intf_next_seq(ipmi_smi_t intf, |
653 | struct ipmi_recv_msg *recv_msg, | 715 | struct ipmi_recv_msg *recv_msg, |
654 | unsigned long timeout, | 716 | unsigned long timeout, |
@@ -660,10 +722,8 @@ static int intf_next_seq(ipmi_smi_t intf, | |||
660 | int rv = 0; | 722 | int rv = 0; |
661 | unsigned int i; | 723 | unsigned int i; |
662 | 724 | ||
663 | for (i = intf->curr_seq; | 725 | for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; |
664 | (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; | 726 | i = (i+1)%IPMI_IPMB_NUM_SEQ) { |
665 | i = (i+1)%IPMI_IPMB_NUM_SEQ) | ||
666 | { | ||
667 | if (!intf->seq_table[i].inuse) | 727 | if (!intf->seq_table[i].inuse) |
668 | break; | 728 | break; |
669 | } | 729 | } |
@@ -671,8 +731,10 @@ static int intf_next_seq(ipmi_smi_t intf, | |||
671 | if (!intf->seq_table[i].inuse) { | 731 | if (!intf->seq_table[i].inuse) { |
672 | intf->seq_table[i].recv_msg = recv_msg; | 732 | intf->seq_table[i].recv_msg = recv_msg; |
673 | 733 | ||
674 | /* Start with the maximum timeout, when the send response | 734 | /* |
675 | comes in we will start the real timer. */ | 735 | * Start with the maximum timeout, when the send response |
736 | * comes in we will start the real timer. | ||
737 | */ | ||
676 | intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; | 738 | intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; |
677 | intf->seq_table[i].orig_timeout = timeout; | 739 | intf->seq_table[i].orig_timeout = timeout; |
678 | intf->seq_table[i].retries_left = retries; | 740 | intf->seq_table[i].retries_left = retries; |
@@ -685,15 +747,17 @@ static int intf_next_seq(ipmi_smi_t intf, | |||
685 | } else { | 747 | } else { |
686 | rv = -EAGAIN; | 748 | rv = -EAGAIN; |
687 | } | 749 | } |
688 | 750 | ||
689 | return rv; | 751 | return rv; |
690 | } | 752 | } |
691 | 753 | ||
692 | /* Return the receive message for the given sequence number and | 754 | /* |
693 | release the sequence number so it can be reused. Some other data | 755 | * Return the receive message for the given sequence number and |
694 | is passed in to be sure the message matches up correctly (to help | 756 | * release the sequence number so it can be reused. Some other data |
695 | guard against message coming in after their timeout and the | 757 | * is passed in to be sure the message matches up correctly (to help |
696 | sequence number being reused). */ | 758 | * guard against message coming in after their timeout and the |
759 | * sequence number being reused). | ||
760 | */ | ||
697 | static int intf_find_seq(ipmi_smi_t intf, | 761 | static int intf_find_seq(ipmi_smi_t intf, |
698 | unsigned char seq, | 762 | unsigned char seq, |
699 | short channel, | 763 | short channel, |
@@ -712,11 +776,9 @@ static int intf_find_seq(ipmi_smi_t intf, | |||
712 | if (intf->seq_table[seq].inuse) { | 776 | if (intf->seq_table[seq].inuse) { |
713 | struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; | 777 | struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; |
714 | 778 | ||
715 | if ((msg->addr.channel == channel) | 779 | if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) |
716 | && (msg->msg.cmd == cmd) | 780 | && (msg->msg.netfn == netfn) |
717 | && (msg->msg.netfn == netfn) | 781 | && (ipmi_addr_equal(addr, &(msg->addr)))) { |
718 | && (ipmi_addr_equal(addr, &(msg->addr)))) | ||
719 | { | ||
720 | *recv_msg = msg; | 782 | *recv_msg = msg; |
721 | intf->seq_table[seq].inuse = 0; | 783 | intf->seq_table[seq].inuse = 0; |
722 | rv = 0; | 784 | rv = 0; |
@@ -741,11 +803,12 @@ static int intf_start_seq_timer(ipmi_smi_t intf, | |||
741 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); | 803 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); |
742 | 804 | ||
743 | spin_lock_irqsave(&(intf->seq_lock), flags); | 805 | spin_lock_irqsave(&(intf->seq_lock), flags); |
744 | /* We do this verification because the user can be deleted | 806 | /* |
745 | while a message is outstanding. */ | 807 | * We do this verification because the user can be deleted |
808 | * while a message is outstanding. | ||
809 | */ | ||
746 | if ((intf->seq_table[seq].inuse) | 810 | if ((intf->seq_table[seq].inuse) |
747 | && (intf->seq_table[seq].seqid == seqid)) | 811 | && (intf->seq_table[seq].seqid == seqid)) { |
748 | { | ||
749 | struct seq_table *ent = &(intf->seq_table[seq]); | 812 | struct seq_table *ent = &(intf->seq_table[seq]); |
750 | ent->timeout = ent->orig_timeout; | 813 | ent->timeout = ent->orig_timeout; |
751 | rv = 0; | 814 | rv = 0; |
@@ -770,11 +833,12 @@ static int intf_err_seq(ipmi_smi_t intf, | |||
770 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); | 833 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); |
771 | 834 | ||
772 | spin_lock_irqsave(&(intf->seq_lock), flags); | 835 | spin_lock_irqsave(&(intf->seq_lock), flags); |
773 | /* We do this verification because the user can be deleted | 836 | /* |
774 | while a message is outstanding. */ | 837 | * We do this verification because the user can be deleted |
838 | * while a message is outstanding. | ||
839 | */ | ||
775 | if ((intf->seq_table[seq].inuse) | 840 | if ((intf->seq_table[seq].inuse) |
776 | && (intf->seq_table[seq].seqid == seqid)) | 841 | && (intf->seq_table[seq].seqid == seqid)) { |
777 | { | ||
778 | struct seq_table *ent = &(intf->seq_table[seq]); | 842 | struct seq_table *ent = &(intf->seq_table[seq]); |
779 | 843 | ||
780 | ent->inuse = 0; | 844 | ent->inuse = 0; |
@@ -800,24 +864,30 @@ int ipmi_create_user(unsigned int if_num, | |||
800 | int rv = 0; | 864 | int rv = 0; |
801 | ipmi_smi_t intf; | 865 | ipmi_smi_t intf; |
802 | 866 | ||
803 | /* There is no module usecount here, because it's not | 867 | /* |
804 | required. Since this can only be used by and called from | 868 | * There is no module usecount here, because it's not |
805 | other modules, they will implicitly use this module, and | 869 | * required. Since this can only be used by and called from |
806 | thus this can't be removed unless the other modules are | 870 | * other modules, they will implicitly use this module, and |
807 | removed. */ | 871 | * thus this can't be removed unless the other modules are |
872 | * removed. | ||
873 | */ | ||
808 | 874 | ||
809 | if (handler == NULL) | 875 | if (handler == NULL) |
810 | return -EINVAL; | 876 | return -EINVAL; |
811 | 877 | ||
812 | /* Make sure the driver is actually initialized, this handles | 878 | /* |
813 | problems with initialization order. */ | 879 | * Make sure the driver is actually initialized, this handles |
880 | * problems with initialization order. | ||
881 | */ | ||
814 | if (!initialized) { | 882 | if (!initialized) { |
815 | rv = ipmi_init_msghandler(); | 883 | rv = ipmi_init_msghandler(); |
816 | if (rv) | 884 | if (rv) |
817 | return rv; | 885 | return rv; |
818 | 886 | ||
819 | /* The init code doesn't return an error if it was turned | 887 | /* |
820 | off, but it won't initialize. Check that. */ | 888 | * The init code doesn't return an error if it was turned |
889 | * off, but it won't initialize. Check that. | ||
890 | */ | ||
821 | if (!initialized) | 891 | if (!initialized) |
822 | return -ENODEV; | 892 | return -ENODEV; |
823 | } | 893 | } |
@@ -858,8 +928,10 @@ int ipmi_create_user(unsigned int if_num, | |||
858 | } | 928 | } |
859 | } | 929 | } |
860 | 930 | ||
861 | /* Hold the lock so intf->handlers is guaranteed to be good | 931 | /* |
862 | * until now */ | 932 | * Hold the lock so intf->handlers is guaranteed to be good |
933 | * until now | ||
934 | */ | ||
863 | mutex_unlock(&ipmi_interfaces_mutex); | 935 | mutex_unlock(&ipmi_interfaces_mutex); |
864 | 936 | ||
865 | new_user->valid = 1; | 937 | new_user->valid = 1; |
@@ -876,6 +948,7 @@ out_kfree: | |||
876 | kfree(new_user); | 948 | kfree(new_user); |
877 | return rv; | 949 | return rv; |
878 | } | 950 | } |
951 | EXPORT_SYMBOL(ipmi_create_user); | ||
879 | 952 | ||
880 | static void free_user(struct kref *ref) | 953 | static void free_user(struct kref *ref) |
881 | { | 954 | { |
@@ -899,8 +972,7 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
899 | 972 | ||
900 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { | 973 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { |
901 | if (intf->seq_table[i].inuse | 974 | if (intf->seq_table[i].inuse |
902 | && (intf->seq_table[i].recv_msg->user == user)) | 975 | && (intf->seq_table[i].recv_msg->user == user)) { |
903 | { | ||
904 | intf->seq_table[i].inuse = 0; | 976 | intf->seq_table[i].inuse = 0; |
905 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); | 977 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); |
906 | } | 978 | } |
@@ -943,6 +1015,7 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
943 | 1015 | ||
944 | return 0; | 1016 | return 0; |
945 | } | 1017 | } |
1018 | EXPORT_SYMBOL(ipmi_destroy_user); | ||
946 | 1019 | ||
947 | void ipmi_get_version(ipmi_user_t user, | 1020 | void ipmi_get_version(ipmi_user_t user, |
948 | unsigned char *major, | 1021 | unsigned char *major, |
@@ -951,6 +1024,7 @@ void ipmi_get_version(ipmi_user_t user, | |||
951 | *major = user->intf->ipmi_version_major; | 1024 | *major = user->intf->ipmi_version_major; |
952 | *minor = user->intf->ipmi_version_minor; | 1025 | *minor = user->intf->ipmi_version_minor; |
953 | } | 1026 | } |
1027 | EXPORT_SYMBOL(ipmi_get_version); | ||
954 | 1028 | ||
955 | int ipmi_set_my_address(ipmi_user_t user, | 1029 | int ipmi_set_my_address(ipmi_user_t user, |
956 | unsigned int channel, | 1030 | unsigned int channel, |
@@ -961,6 +1035,7 @@ int ipmi_set_my_address(ipmi_user_t user, | |||
961 | user->intf->channels[channel].address = address; | 1035 | user->intf->channels[channel].address = address; |
962 | return 0; | 1036 | return 0; |
963 | } | 1037 | } |
1038 | EXPORT_SYMBOL(ipmi_set_my_address); | ||
964 | 1039 | ||
965 | int ipmi_get_my_address(ipmi_user_t user, | 1040 | int ipmi_get_my_address(ipmi_user_t user, |
966 | unsigned int channel, | 1041 | unsigned int channel, |
@@ -971,6 +1046,7 @@ int ipmi_get_my_address(ipmi_user_t user, | |||
971 | *address = user->intf->channels[channel].address; | 1046 | *address = user->intf->channels[channel].address; |
972 | return 0; | 1047 | return 0; |
973 | } | 1048 | } |
1049 | EXPORT_SYMBOL(ipmi_get_my_address); | ||
974 | 1050 | ||
975 | int ipmi_set_my_LUN(ipmi_user_t user, | 1051 | int ipmi_set_my_LUN(ipmi_user_t user, |
976 | unsigned int channel, | 1052 | unsigned int channel, |
@@ -981,6 +1057,7 @@ int ipmi_set_my_LUN(ipmi_user_t user, | |||
981 | user->intf->channels[channel].lun = LUN & 0x3; | 1057 | user->intf->channels[channel].lun = LUN & 0x3; |
982 | return 0; | 1058 | return 0; |
983 | } | 1059 | } |
1060 | EXPORT_SYMBOL(ipmi_set_my_LUN); | ||
984 | 1061 | ||
985 | int ipmi_get_my_LUN(ipmi_user_t user, | 1062 | int ipmi_get_my_LUN(ipmi_user_t user, |
986 | unsigned int channel, | 1063 | unsigned int channel, |
@@ -991,6 +1068,7 @@ int ipmi_get_my_LUN(ipmi_user_t user, | |||
991 | *address = user->intf->channels[channel].lun; | 1068 | *address = user->intf->channels[channel].lun; |
992 | return 0; | 1069 | return 0; |
993 | } | 1070 | } |
1071 | EXPORT_SYMBOL(ipmi_get_my_LUN); | ||
994 | 1072 | ||
995 | int ipmi_get_maintenance_mode(ipmi_user_t user) | 1073 | int ipmi_get_maintenance_mode(ipmi_user_t user) |
996 | { | 1074 | { |
@@ -1075,6 +1153,11 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) | |||
1075 | list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) | 1153 | list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) |
1076 | list_move_tail(&msg->link, &msgs); | 1154 | list_move_tail(&msg->link, &msgs); |
1077 | intf->waiting_events_count = 0; | 1155 | intf->waiting_events_count = 0; |
1156 | if (intf->event_msg_printed) { | ||
1157 | printk(KERN_WARNING PFX "Event queue no longer" | ||
1158 | " full\n"); | ||
1159 | intf->event_msg_printed = 0; | ||
1160 | } | ||
1078 | 1161 | ||
1079 | intf->delivering_events = 1; | 1162 | intf->delivering_events = 1; |
1080 | spin_unlock_irqrestore(&intf->events_lock, flags); | 1163 | spin_unlock_irqrestore(&intf->events_lock, flags); |
@@ -1094,6 +1177,7 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) | |||
1094 | 1177 | ||
1095 | return 0; | 1178 | return 0; |
1096 | } | 1179 | } |
1180 | EXPORT_SYMBOL(ipmi_set_gets_events); | ||
1097 | 1181 | ||
1098 | static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, | 1182 | static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, |
1099 | unsigned char netfn, | 1183 | unsigned char netfn, |
@@ -1159,6 +1243,7 @@ int ipmi_register_for_cmd(ipmi_user_t user, | |||
1159 | 1243 | ||
1160 | return rv; | 1244 | return rv; |
1161 | } | 1245 | } |
1246 | EXPORT_SYMBOL(ipmi_register_for_cmd); | ||
1162 | 1247 | ||
1163 | int ipmi_unregister_for_cmd(ipmi_user_t user, | 1248 | int ipmi_unregister_for_cmd(ipmi_user_t user, |
1164 | unsigned char netfn, | 1249 | unsigned char netfn, |
@@ -1196,19 +1281,13 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, | |||
1196 | } | 1281 | } |
1197 | return rv; | 1282 | return rv; |
1198 | } | 1283 | } |
1199 | 1284 | EXPORT_SYMBOL(ipmi_unregister_for_cmd); | |
1200 | void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) | ||
1201 | { | ||
1202 | ipmi_smi_t intf = user->intf; | ||
1203 | if (intf->handlers) | ||
1204 | intf->handlers->set_run_to_completion(intf->send_info, val); | ||
1205 | } | ||
1206 | 1285 | ||
1207 | static unsigned char | 1286 | static unsigned char |
1208 | ipmb_checksum(unsigned char *data, int size) | 1287 | ipmb_checksum(unsigned char *data, int size) |
1209 | { | 1288 | { |
1210 | unsigned char csum = 0; | 1289 | unsigned char csum = 0; |
1211 | 1290 | ||
1212 | for (; size > 0; size--, data++) | 1291 | for (; size > 0; size--, data++) |
1213 | csum += *data; | 1292 | csum += *data; |
1214 | 1293 | ||
@@ -1250,8 +1329,10 @@ static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, | |||
1250 | = ipmb_checksum(&(smi_msg->data[i+6]), | 1329 | = ipmb_checksum(&(smi_msg->data[i+6]), |
1251 | smi_msg->data_size-6); | 1330 | smi_msg->data_size-6); |
1252 | 1331 | ||
1253 | /* Add on the checksum size and the offset from the | 1332 | /* |
1254 | broadcast. */ | 1333 | * Add on the checksum size and the offset from the |
1334 | * broadcast. | ||
1335 | */ | ||
1255 | smi_msg->data_size += 1 + i; | 1336 | smi_msg->data_size += 1 + i; |
1256 | 1337 | ||
1257 | smi_msg->msgid = msgid; | 1338 | smi_msg->msgid = msgid; |
@@ -1287,17 +1368,21 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, | |||
1287 | = ipmb_checksum(&(smi_msg->data[7]), | 1368 | = ipmb_checksum(&(smi_msg->data[7]), |
1288 | smi_msg->data_size-7); | 1369 | smi_msg->data_size-7); |
1289 | 1370 | ||
1290 | /* Add on the checksum size and the offset from the | 1371 | /* |
1291 | broadcast. */ | 1372 | * Add on the checksum size and the offset from the |
1373 | * broadcast. | ||
1374 | */ | ||
1292 | smi_msg->data_size += 1; | 1375 | smi_msg->data_size += 1; |
1293 | 1376 | ||
1294 | smi_msg->msgid = msgid; | 1377 | smi_msg->msgid = msgid; |
1295 | } | 1378 | } |
1296 | 1379 | ||
1297 | /* Separate from ipmi_request so that the user does not have to be | 1380 | /* |
1298 | supplied in certain circumstances (mainly at panic time). If | 1381 | * Separate from ipmi_request so that the user does not have to be |
1299 | messages are supplied, they will be freed, even if an error | 1382 | * supplied in certain circumstances (mainly at panic time). If |
1300 | occurs. */ | 1383 | * messages are supplied, they will be freed, even if an error |
1384 | * occurs. | ||
1385 | */ | ||
1301 | static int i_ipmi_request(ipmi_user_t user, | 1386 | static int i_ipmi_request(ipmi_user_t user, |
1302 | ipmi_smi_t intf, | 1387 | ipmi_smi_t intf, |
1303 | struct ipmi_addr *addr, | 1388 | struct ipmi_addr *addr, |
@@ -1319,19 +1404,18 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1319 | struct ipmi_smi_handlers *handlers; | 1404 | struct ipmi_smi_handlers *handlers; |
1320 | 1405 | ||
1321 | 1406 | ||
1322 | if (supplied_recv) { | 1407 | if (supplied_recv) |
1323 | recv_msg = supplied_recv; | 1408 | recv_msg = supplied_recv; |
1324 | } else { | 1409 | else { |
1325 | recv_msg = ipmi_alloc_recv_msg(); | 1410 | recv_msg = ipmi_alloc_recv_msg(); |
1326 | if (recv_msg == NULL) { | 1411 | if (recv_msg == NULL) |
1327 | return -ENOMEM; | 1412 | return -ENOMEM; |
1328 | } | ||
1329 | } | 1413 | } |
1330 | recv_msg->user_msg_data = user_msg_data; | 1414 | recv_msg->user_msg_data = user_msg_data; |
1331 | 1415 | ||
1332 | if (supplied_smi) { | 1416 | if (supplied_smi) |
1333 | smi_msg = (struct ipmi_smi_msg *) supplied_smi; | 1417 | smi_msg = (struct ipmi_smi_msg *) supplied_smi; |
1334 | } else { | 1418 | else { |
1335 | smi_msg = ipmi_alloc_smi_msg(); | 1419 | smi_msg = ipmi_alloc_smi_msg(); |
1336 | if (smi_msg == NULL) { | 1420 | if (smi_msg == NULL) { |
1337 | ipmi_free_recv_msg(recv_msg); | 1421 | ipmi_free_recv_msg(recv_msg); |
@@ -1350,8 +1434,10 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1350 | if (user) | 1434 | if (user) |
1351 | kref_get(&user->refcount); | 1435 | kref_get(&user->refcount); |
1352 | recv_msg->msgid = msgid; | 1436 | recv_msg->msgid = msgid; |
1353 | /* Store the message to send in the receive message so timeout | 1437 | /* |
1354 | responses can get the proper response data. */ | 1438 | * Store the message to send in the receive message so timeout |
1439 | * responses can get the proper response data. | ||
1440 | */ | ||
1355 | recv_msg->msg = *msg; | 1441 | recv_msg->msg = *msg; |
1356 | 1442 | ||
1357 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { | 1443 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { |
@@ -1365,9 +1451,7 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1365 | 1451 | ||
1366 | smi_addr = (struct ipmi_system_interface_addr *) addr; | 1452 | smi_addr = (struct ipmi_system_interface_addr *) addr; |
1367 | if (smi_addr->lun > 3) { | 1453 | if (smi_addr->lun > 3) { |
1368 | spin_lock_irqsave(&intf->counter_lock, flags); | 1454 | ipmi_inc_stat(intf, sent_invalid_commands); |
1369 | intf->sent_invalid_commands++; | ||
1370 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1371 | rv = -EINVAL; | 1455 | rv = -EINVAL; |
1372 | goto out_err; | 1456 | goto out_err; |
1373 | } | 1457 | } |
@@ -1377,13 +1461,12 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1377 | if ((msg->netfn == IPMI_NETFN_APP_REQUEST) | 1461 | if ((msg->netfn == IPMI_NETFN_APP_REQUEST) |
1378 | && ((msg->cmd == IPMI_SEND_MSG_CMD) | 1462 | && ((msg->cmd == IPMI_SEND_MSG_CMD) |
1379 | || (msg->cmd == IPMI_GET_MSG_CMD) | 1463 | || (msg->cmd == IPMI_GET_MSG_CMD) |
1380 | || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) | 1464 | || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { |
1381 | { | 1465 | /* |
1382 | /* We don't let the user do these, since we manage | 1466 | * We don't let the user do these, since we manage |
1383 | the sequence numbers. */ | 1467 | * the sequence numbers. |
1384 | spin_lock_irqsave(&intf->counter_lock, flags); | 1468 | */ |
1385 | intf->sent_invalid_commands++; | 1469 | ipmi_inc_stat(intf, sent_invalid_commands); |
1386 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1387 | rv = -EINVAL; | 1470 | rv = -EINVAL; |
1388 | goto out_err; | 1471 | goto out_err; |
1389 | } | 1472 | } |
@@ -1391,14 +1474,12 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1391 | if (((msg->netfn == IPMI_NETFN_APP_REQUEST) | 1474 | if (((msg->netfn == IPMI_NETFN_APP_REQUEST) |
1392 | && ((msg->cmd == IPMI_COLD_RESET_CMD) | 1475 | && ((msg->cmd == IPMI_COLD_RESET_CMD) |
1393 | || (msg->cmd == IPMI_WARM_RESET_CMD))) | 1476 | || (msg->cmd == IPMI_WARM_RESET_CMD))) |
1394 | || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) | 1477 | || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) { |
1395 | { | ||
1396 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); | 1478 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); |
1397 | intf->auto_maintenance_timeout | 1479 | intf->auto_maintenance_timeout |
1398 | = IPMI_MAINTENANCE_MODE_TIMEOUT; | 1480 | = IPMI_MAINTENANCE_MODE_TIMEOUT; |
1399 | if (!intf->maintenance_mode | 1481 | if (!intf->maintenance_mode |
1400 | && !intf->maintenance_mode_enable) | 1482 | && !intf->maintenance_mode_enable) { |
1401 | { | ||
1402 | intf->maintenance_mode_enable = 1; | 1483 | intf->maintenance_mode_enable = 1; |
1403 | maintenance_mode_update(intf); | 1484 | maintenance_mode_update(intf); |
1404 | } | 1485 | } |
@@ -1407,9 +1488,7 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1407 | } | 1488 | } |
1408 | 1489 | ||
1409 | if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { | 1490 | if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { |
1410 | spin_lock_irqsave(&intf->counter_lock, flags); | 1491 | ipmi_inc_stat(intf, sent_invalid_commands); |
1411 | intf->sent_invalid_commands++; | ||
1412 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1413 | rv = -EMSGSIZE; | 1492 | rv = -EMSGSIZE; |
1414 | goto out_err; | 1493 | goto out_err; |
1415 | } | 1494 | } |
@@ -1421,31 +1500,23 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1421 | if (msg->data_len > 0) | 1500 | if (msg->data_len > 0) |
1422 | memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); | 1501 | memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); |
1423 | smi_msg->data_size = msg->data_len + 2; | 1502 | smi_msg->data_size = msg->data_len + 2; |
1424 | spin_lock_irqsave(&intf->counter_lock, flags); | 1503 | ipmi_inc_stat(intf, sent_local_commands); |
1425 | intf->sent_local_commands++; | ||
1426 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1427 | } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) | 1504 | } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) |
1428 | || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) | 1505 | || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) { |
1429 | { | ||
1430 | struct ipmi_ipmb_addr *ipmb_addr; | 1506 | struct ipmi_ipmb_addr *ipmb_addr; |
1431 | unsigned char ipmb_seq; | 1507 | unsigned char ipmb_seq; |
1432 | long seqid; | 1508 | long seqid; |
1433 | int broadcast = 0; | 1509 | int broadcast = 0; |
1434 | 1510 | ||
1435 | if (addr->channel >= IPMI_MAX_CHANNELS) { | 1511 | if (addr->channel >= IPMI_MAX_CHANNELS) { |
1436 | spin_lock_irqsave(&intf->counter_lock, flags); | 1512 | ipmi_inc_stat(intf, sent_invalid_commands); |
1437 | intf->sent_invalid_commands++; | ||
1438 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1439 | rv = -EINVAL; | 1513 | rv = -EINVAL; |
1440 | goto out_err; | 1514 | goto out_err; |
1441 | } | 1515 | } |
1442 | 1516 | ||
1443 | if (intf->channels[addr->channel].medium | 1517 | if (intf->channels[addr->channel].medium |
1444 | != IPMI_CHANNEL_MEDIUM_IPMB) | 1518 | != IPMI_CHANNEL_MEDIUM_IPMB) { |
1445 | { | 1519 | ipmi_inc_stat(intf, sent_invalid_commands); |
1446 | spin_lock_irqsave(&intf->counter_lock, flags); | ||
1447 | intf->sent_invalid_commands++; | ||
1448 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1449 | rv = -EINVAL; | 1520 | rv = -EINVAL; |
1450 | goto out_err; | 1521 | goto out_err; |
1451 | } | 1522 | } |
@@ -1457,9 +1528,11 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1457 | retries = 4; | 1528 | retries = 4; |
1458 | } | 1529 | } |
1459 | if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { | 1530 | if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { |
1460 | /* Broadcasts add a zero at the beginning of the | 1531 | /* |
1461 | message, but otherwise is the same as an IPMB | 1532 | * Broadcasts add a zero at the beginning of the |
1462 | address. */ | 1533 | * message, but otherwise is the same as an IPMB |
1534 | * address. | ||
1535 | */ | ||
1463 | addr->addr_type = IPMI_IPMB_ADDR_TYPE; | 1536 | addr->addr_type = IPMI_IPMB_ADDR_TYPE; |
1464 | broadcast = 1; | 1537 | broadcast = 1; |
1465 | } | 1538 | } |
@@ -1469,21 +1542,19 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1469 | if (retry_time_ms == 0) | 1542 | if (retry_time_ms == 0) |
1470 | retry_time_ms = 1000; | 1543 | retry_time_ms = 1000; |
1471 | 1544 | ||
1472 | /* 9 for the header and 1 for the checksum, plus | 1545 | /* |
1473 | possibly one for the broadcast. */ | 1546 | * 9 for the header and 1 for the checksum, plus |
1547 | * possibly one for the broadcast. | ||
1548 | */ | ||
1474 | if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { | 1549 | if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { |
1475 | spin_lock_irqsave(&intf->counter_lock, flags); | 1550 | ipmi_inc_stat(intf, sent_invalid_commands); |
1476 | intf->sent_invalid_commands++; | ||
1477 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1478 | rv = -EMSGSIZE; | 1551 | rv = -EMSGSIZE; |
1479 | goto out_err; | 1552 | goto out_err; |
1480 | } | 1553 | } |
1481 | 1554 | ||
1482 | ipmb_addr = (struct ipmi_ipmb_addr *) addr; | 1555 | ipmb_addr = (struct ipmi_ipmb_addr *) addr; |
1483 | if (ipmb_addr->lun > 3) { | 1556 | if (ipmb_addr->lun > 3) { |
1484 | spin_lock_irqsave(&intf->counter_lock, flags); | 1557 | ipmi_inc_stat(intf, sent_invalid_commands); |
1485 | intf->sent_invalid_commands++; | ||
1486 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1487 | rv = -EINVAL; | 1558 | rv = -EINVAL; |
1488 | goto out_err; | 1559 | goto out_err; |
1489 | } | 1560 | } |
@@ -1491,29 +1562,31 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1491 | memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); | 1562 | memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); |
1492 | 1563 | ||
1493 | if (recv_msg->msg.netfn & 0x1) { | 1564 | if (recv_msg->msg.netfn & 0x1) { |
1494 | /* It's a response, so use the user's sequence | 1565 | /* |
1495 | from msgid. */ | 1566 | * It's a response, so use the user's sequence |
1496 | spin_lock_irqsave(&intf->counter_lock, flags); | 1567 | * from msgid. |
1497 | intf->sent_ipmb_responses++; | 1568 | */ |
1498 | spin_unlock_irqrestore(&intf->counter_lock, flags); | 1569 | ipmi_inc_stat(intf, sent_ipmb_responses); |
1499 | format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, | 1570 | format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, |
1500 | msgid, broadcast, | 1571 | msgid, broadcast, |
1501 | source_address, source_lun); | 1572 | source_address, source_lun); |
1502 | 1573 | ||
1503 | /* Save the receive message so we can use it | 1574 | /* |
1504 | to deliver the response. */ | 1575 | * Save the receive message so we can use it |
1576 | * to deliver the response. | ||
1577 | */ | ||
1505 | smi_msg->user_data = recv_msg; | 1578 | smi_msg->user_data = recv_msg; |
1506 | } else { | 1579 | } else { |
1507 | /* It's a command, so get a sequence for it. */ | 1580 | /* It's a command, so get a sequence for it. */ |
1508 | 1581 | ||
1509 | spin_lock_irqsave(&(intf->seq_lock), flags); | 1582 | spin_lock_irqsave(&(intf->seq_lock), flags); |
1510 | 1583 | ||
1511 | spin_lock(&intf->counter_lock); | 1584 | ipmi_inc_stat(intf, sent_ipmb_commands); |
1512 | intf->sent_ipmb_commands++; | ||
1513 | spin_unlock(&intf->counter_lock); | ||
1514 | 1585 | ||
1515 | /* Create a sequence number with a 1 second | 1586 | /* |
1516 | timeout and 4 retries. */ | 1587 | * Create a sequence number with a 1 second |
1588 | * timeout and 4 retries. | ||
1589 | */ | ||
1517 | rv = intf_next_seq(intf, | 1590 | rv = intf_next_seq(intf, |
1518 | recv_msg, | 1591 | recv_msg, |
1519 | retry_time_ms, | 1592 | retry_time_ms, |
@@ -1522,34 +1595,42 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1522 | &ipmb_seq, | 1595 | &ipmb_seq, |
1523 | &seqid); | 1596 | &seqid); |
1524 | if (rv) { | 1597 | if (rv) { |
1525 | /* We have used up all the sequence numbers, | 1598 | /* |
1526 | probably, so abort. */ | 1599 | * We have used up all the sequence numbers, |
1600 | * probably, so abort. | ||
1601 | */ | ||
1527 | spin_unlock_irqrestore(&(intf->seq_lock), | 1602 | spin_unlock_irqrestore(&(intf->seq_lock), |
1528 | flags); | 1603 | flags); |
1529 | goto out_err; | 1604 | goto out_err; |
1530 | } | 1605 | } |
1531 | 1606 | ||
1532 | /* Store the sequence number in the message, | 1607 | /* |
1533 | so that when the send message response | 1608 | * Store the sequence number in the message, |
1534 | comes back we can start the timer. */ | 1609 | * so that when the send message response |
1610 | * comes back we can start the timer. | ||
1611 | */ | ||
1535 | format_ipmb_msg(smi_msg, msg, ipmb_addr, | 1612 | format_ipmb_msg(smi_msg, msg, ipmb_addr, |
1536 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), | 1613 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), |
1537 | ipmb_seq, broadcast, | 1614 | ipmb_seq, broadcast, |
1538 | source_address, source_lun); | 1615 | source_address, source_lun); |
1539 | 1616 | ||
1540 | /* Copy the message into the recv message data, so we | 1617 | /* |
1541 | can retransmit it later if necessary. */ | 1618 | * Copy the message into the recv message data, so we |
1619 | * can retransmit it later if necessary. | ||
1620 | */ | ||
1542 | memcpy(recv_msg->msg_data, smi_msg->data, | 1621 | memcpy(recv_msg->msg_data, smi_msg->data, |
1543 | smi_msg->data_size); | 1622 | smi_msg->data_size); |
1544 | recv_msg->msg.data = recv_msg->msg_data; | 1623 | recv_msg->msg.data = recv_msg->msg_data; |
1545 | recv_msg->msg.data_len = smi_msg->data_size; | 1624 | recv_msg->msg.data_len = smi_msg->data_size; |
1546 | 1625 | ||
1547 | /* We don't unlock until here, because we need | 1626 | /* |
1548 | to copy the completed message into the | 1627 | * We don't unlock until here, because we need |
1549 | recv_msg before we release the lock. | 1628 | * to copy the completed message into the |
1550 | Otherwise, race conditions may bite us. I | 1629 | * recv_msg before we release the lock. |
1551 | know that's pretty paranoid, but I prefer | 1630 | * Otherwise, race conditions may bite us. I |
1552 | to be correct. */ | 1631 | * know that's pretty paranoid, but I prefer |
1632 | * to be correct. | ||
1633 | */ | ||
1553 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 1634 | spin_unlock_irqrestore(&(intf->seq_lock), flags); |
1554 | } | 1635 | } |
1555 | } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { | 1636 | } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { |
@@ -1558,21 +1639,16 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1558 | long seqid; | 1639 | long seqid; |
1559 | 1640 | ||
1560 | if (addr->channel >= IPMI_MAX_CHANNELS) { | 1641 | if (addr->channel >= IPMI_MAX_CHANNELS) { |
1561 | spin_lock_irqsave(&intf->counter_lock, flags); | 1642 | ipmi_inc_stat(intf, sent_invalid_commands); |
1562 | intf->sent_invalid_commands++; | ||
1563 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1564 | rv = -EINVAL; | 1643 | rv = -EINVAL; |
1565 | goto out_err; | 1644 | goto out_err; |
1566 | } | 1645 | } |
1567 | 1646 | ||
1568 | if ((intf->channels[addr->channel].medium | 1647 | if ((intf->channels[addr->channel].medium |
1569 | != IPMI_CHANNEL_MEDIUM_8023LAN) | 1648 | != IPMI_CHANNEL_MEDIUM_8023LAN) |
1570 | && (intf->channels[addr->channel].medium | 1649 | && (intf->channels[addr->channel].medium |
1571 | != IPMI_CHANNEL_MEDIUM_ASYNC)) | 1650 | != IPMI_CHANNEL_MEDIUM_ASYNC)) { |
1572 | { | 1651 | ipmi_inc_stat(intf, sent_invalid_commands); |
1573 | spin_lock_irqsave(&intf->counter_lock, flags); | ||
1574 | intf->sent_invalid_commands++; | ||
1575 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1576 | rv = -EINVAL; | 1652 | rv = -EINVAL; |
1577 | goto out_err; | 1653 | goto out_err; |
1578 | } | 1654 | } |
@@ -1585,18 +1661,14 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1585 | 1661 | ||
1586 | /* 11 for the header and 1 for the checksum. */ | 1662 | /* 11 for the header and 1 for the checksum. */ |
1587 | if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { | 1663 | if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { |
1588 | spin_lock_irqsave(&intf->counter_lock, flags); | 1664 | ipmi_inc_stat(intf, sent_invalid_commands); |
1589 | intf->sent_invalid_commands++; | ||
1590 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1591 | rv = -EMSGSIZE; | 1665 | rv = -EMSGSIZE; |
1592 | goto out_err; | 1666 | goto out_err; |
1593 | } | 1667 | } |
1594 | 1668 | ||
1595 | lan_addr = (struct ipmi_lan_addr *) addr; | 1669 | lan_addr = (struct ipmi_lan_addr *) addr; |
1596 | if (lan_addr->lun > 3) { | 1670 | if (lan_addr->lun > 3) { |
1597 | spin_lock_irqsave(&intf->counter_lock, flags); | 1671 | ipmi_inc_stat(intf, sent_invalid_commands); |
1598 | intf->sent_invalid_commands++; | ||
1599 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1600 | rv = -EINVAL; | 1672 | rv = -EINVAL; |
1601 | goto out_err; | 1673 | goto out_err; |
1602 | } | 1674 | } |
@@ -1604,28 +1676,30 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1604 | memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); | 1676 | memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); |
1605 | 1677 | ||
1606 | if (recv_msg->msg.netfn & 0x1) { | 1678 | if (recv_msg->msg.netfn & 0x1) { |
1607 | /* It's a response, so use the user's sequence | 1679 | /* |
1608 | from msgid. */ | 1680 | * It's a response, so use the user's sequence |
1609 | spin_lock_irqsave(&intf->counter_lock, flags); | 1681 | * from msgid. |
1610 | intf->sent_lan_responses++; | 1682 | */ |
1611 | spin_unlock_irqrestore(&intf->counter_lock, flags); | 1683 | ipmi_inc_stat(intf, sent_lan_responses); |
1612 | format_lan_msg(smi_msg, msg, lan_addr, msgid, | 1684 | format_lan_msg(smi_msg, msg, lan_addr, msgid, |
1613 | msgid, source_lun); | 1685 | msgid, source_lun); |
1614 | 1686 | ||
1615 | /* Save the receive message so we can use it | 1687 | /* |
1616 | to deliver the response. */ | 1688 | * Save the receive message so we can use it |
1689 | * to deliver the response. | ||
1690 | */ | ||
1617 | smi_msg->user_data = recv_msg; | 1691 | smi_msg->user_data = recv_msg; |
1618 | } else { | 1692 | } else { |
1619 | /* It's a command, so get a sequence for it. */ | 1693 | /* It's a command, so get a sequence for it. */ |
1620 | 1694 | ||
1621 | spin_lock_irqsave(&(intf->seq_lock), flags); | 1695 | spin_lock_irqsave(&(intf->seq_lock), flags); |
1622 | 1696 | ||
1623 | spin_lock(&intf->counter_lock); | 1697 | ipmi_inc_stat(intf, sent_lan_commands); |
1624 | intf->sent_lan_commands++; | ||
1625 | spin_unlock(&intf->counter_lock); | ||
1626 | 1698 | ||
1627 | /* Create a sequence number with a 1 second | 1699 | /* |
1628 | timeout and 4 retries. */ | 1700 | * Create a sequence number with a 1 second |
1701 | * timeout and 4 retries. | ||
1702 | */ | ||
1629 | rv = intf_next_seq(intf, | 1703 | rv = intf_next_seq(intf, |
1630 | recv_msg, | 1704 | recv_msg, |
1631 | retry_time_ms, | 1705 | retry_time_ms, |
@@ -1634,40 +1708,46 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1634 | &ipmb_seq, | 1708 | &ipmb_seq, |
1635 | &seqid); | 1709 | &seqid); |
1636 | if (rv) { | 1710 | if (rv) { |
1637 | /* We have used up all the sequence numbers, | 1711 | /* |
1638 | probably, so abort. */ | 1712 | * We have used up all the sequence numbers, |
1713 | * probably, so abort. | ||
1714 | */ | ||
1639 | spin_unlock_irqrestore(&(intf->seq_lock), | 1715 | spin_unlock_irqrestore(&(intf->seq_lock), |
1640 | flags); | 1716 | flags); |
1641 | goto out_err; | 1717 | goto out_err; |
1642 | } | 1718 | } |
1643 | 1719 | ||
1644 | /* Store the sequence number in the message, | 1720 | /* |
1645 | so that when the send message response | 1721 | * Store the sequence number in the message, |
1646 | comes back we can start the timer. */ | 1722 | * so that when the send message response |
1723 | * comes back we can start the timer. | ||
1724 | */ | ||
1647 | format_lan_msg(smi_msg, msg, lan_addr, | 1725 | format_lan_msg(smi_msg, msg, lan_addr, |
1648 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), | 1726 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), |
1649 | ipmb_seq, source_lun); | 1727 | ipmb_seq, source_lun); |
1650 | 1728 | ||
1651 | /* Copy the message into the recv message data, so we | 1729 | /* |
1652 | can retransmit it later if necessary. */ | 1730 | * Copy the message into the recv message data, so we |
1731 | * can retransmit it later if necessary. | ||
1732 | */ | ||
1653 | memcpy(recv_msg->msg_data, smi_msg->data, | 1733 | memcpy(recv_msg->msg_data, smi_msg->data, |
1654 | smi_msg->data_size); | 1734 | smi_msg->data_size); |
1655 | recv_msg->msg.data = recv_msg->msg_data; | 1735 | recv_msg->msg.data = recv_msg->msg_data; |
1656 | recv_msg->msg.data_len = smi_msg->data_size; | 1736 | recv_msg->msg.data_len = smi_msg->data_size; |
1657 | 1737 | ||
1658 | /* We don't unlock until here, because we need | 1738 | /* |
1659 | to copy the completed message into the | 1739 | * We don't unlock until here, because we need |
1660 | recv_msg before we release the lock. | 1740 | * to copy the completed message into the |
1661 | Otherwise, race conditions may bite us. I | 1741 | * recv_msg before we release the lock. |
1662 | know that's pretty paranoid, but I prefer | 1742 | * Otherwise, race conditions may bite us. I |
1663 | to be correct. */ | 1743 | * know that's pretty paranoid, but I prefer |
1744 | * to be correct. | ||
1745 | */ | ||
1664 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 1746 | spin_unlock_irqrestore(&(intf->seq_lock), flags); |
1665 | } | 1747 | } |
1666 | } else { | 1748 | } else { |
1667 | /* Unknown address type. */ | 1749 | /* Unknown address type. */ |
1668 | spin_lock_irqsave(&intf->counter_lock, flags); | 1750 | ipmi_inc_stat(intf, sent_invalid_commands); |
1669 | intf->sent_invalid_commands++; | ||
1670 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1671 | rv = -EINVAL; | 1751 | rv = -EINVAL; |
1672 | goto out_err; | 1752 | goto out_err; |
1673 | } | 1753 | } |
@@ -1735,6 +1815,7 @@ int ipmi_request_settime(ipmi_user_t user, | |||
1735 | retries, | 1815 | retries, |
1736 | retry_time_ms); | 1816 | retry_time_ms); |
1737 | } | 1817 | } |
1818 | EXPORT_SYMBOL(ipmi_request_settime); | ||
1738 | 1819 | ||
1739 | int ipmi_request_supply_msgs(ipmi_user_t user, | 1820 | int ipmi_request_supply_msgs(ipmi_user_t user, |
1740 | struct ipmi_addr *addr, | 1821 | struct ipmi_addr *addr, |
@@ -1766,6 +1847,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user, | |||
1766 | lun, | 1847 | lun, |
1767 | -1, 0); | 1848 | -1, 0); |
1768 | } | 1849 | } |
1850 | EXPORT_SYMBOL(ipmi_request_supply_msgs); | ||
1769 | 1851 | ||
1770 | #ifdef CONFIG_PROC_FS | 1852 | #ifdef CONFIG_PROC_FS |
1771 | static int ipmb_file_read_proc(char *page, char **start, off_t off, | 1853 | static int ipmb_file_read_proc(char *page, char **start, off_t off, |
@@ -1790,7 +1872,7 @@ static int version_file_read_proc(char *page, char **start, off_t off, | |||
1790 | char *out = (char *) page; | 1872 | char *out = (char *) page; |
1791 | ipmi_smi_t intf = data; | 1873 | ipmi_smi_t intf = data; |
1792 | 1874 | ||
1793 | return sprintf(out, "%d.%d\n", | 1875 | return sprintf(out, "%u.%u\n", |
1794 | ipmi_version_major(&intf->bmc->id), | 1876 | ipmi_version_major(&intf->bmc->id), |
1795 | ipmi_version_minor(&intf->bmc->id)); | 1877 | ipmi_version_minor(&intf->bmc->id)); |
1796 | } | 1878 | } |
@@ -1801,65 +1883,65 @@ static int stat_file_read_proc(char *page, char **start, off_t off, | |||
1801 | char *out = (char *) page; | 1883 | char *out = (char *) page; |
1802 | ipmi_smi_t intf = data; | 1884 | ipmi_smi_t intf = data; |
1803 | 1885 | ||
1804 | out += sprintf(out, "sent_invalid_commands: %d\n", | 1886 | out += sprintf(out, "sent_invalid_commands: %u\n", |
1805 | intf->sent_invalid_commands); | 1887 | ipmi_get_stat(intf, sent_invalid_commands)); |
1806 | out += sprintf(out, "sent_local_commands: %d\n", | 1888 | out += sprintf(out, "sent_local_commands: %u\n", |
1807 | intf->sent_local_commands); | 1889 | ipmi_get_stat(intf, sent_local_commands)); |
1808 | out += sprintf(out, "handled_local_responses: %d\n", | 1890 | out += sprintf(out, "handled_local_responses: %u\n", |
1809 | intf->handled_local_responses); | 1891 | ipmi_get_stat(intf, handled_local_responses)); |
1810 | out += sprintf(out, "unhandled_local_responses: %d\n", | 1892 | out += sprintf(out, "unhandled_local_responses: %u\n", |
1811 | intf->unhandled_local_responses); | 1893 | ipmi_get_stat(intf, unhandled_local_responses)); |
1812 | out += sprintf(out, "sent_ipmb_commands: %d\n", | 1894 | out += sprintf(out, "sent_ipmb_commands: %u\n", |
1813 | intf->sent_ipmb_commands); | 1895 | ipmi_get_stat(intf, sent_ipmb_commands)); |
1814 | out += sprintf(out, "sent_ipmb_command_errs: %d\n", | 1896 | out += sprintf(out, "sent_ipmb_command_errs: %u\n", |
1815 | intf->sent_ipmb_command_errs); | 1897 | ipmi_get_stat(intf, sent_ipmb_command_errs)); |
1816 | out += sprintf(out, "retransmitted_ipmb_commands: %d\n", | 1898 | out += sprintf(out, "retransmitted_ipmb_commands: %u\n", |
1817 | intf->retransmitted_ipmb_commands); | 1899 | ipmi_get_stat(intf, retransmitted_ipmb_commands)); |
1818 | out += sprintf(out, "timed_out_ipmb_commands: %d\n", | 1900 | out += sprintf(out, "timed_out_ipmb_commands: %u\n", |
1819 | intf->timed_out_ipmb_commands); | 1901 | ipmi_get_stat(intf, timed_out_ipmb_commands)); |
1820 | out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n", | 1902 | out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n", |
1821 | intf->timed_out_ipmb_broadcasts); | 1903 | ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); |
1822 | out += sprintf(out, "sent_ipmb_responses: %d\n", | 1904 | out += sprintf(out, "sent_ipmb_responses: %u\n", |
1823 | intf->sent_ipmb_responses); | 1905 | ipmi_get_stat(intf, sent_ipmb_responses)); |
1824 | out += sprintf(out, "handled_ipmb_responses: %d\n", | 1906 | out += sprintf(out, "handled_ipmb_responses: %u\n", |
1825 | intf->handled_ipmb_responses); | 1907 | ipmi_get_stat(intf, handled_ipmb_responses)); |
1826 | out += sprintf(out, "invalid_ipmb_responses: %d\n", | 1908 | out += sprintf(out, "invalid_ipmb_responses: %u\n", |
1827 | intf->invalid_ipmb_responses); | 1909 | ipmi_get_stat(intf, invalid_ipmb_responses)); |
1828 | out += sprintf(out, "unhandled_ipmb_responses: %d\n", | 1910 | out += sprintf(out, "unhandled_ipmb_responses: %u\n", |
1829 | intf->unhandled_ipmb_responses); | 1911 | ipmi_get_stat(intf, unhandled_ipmb_responses)); |
1830 | out += sprintf(out, "sent_lan_commands: %d\n", | 1912 | out += sprintf(out, "sent_lan_commands: %u\n", |
1831 | intf->sent_lan_commands); | 1913 | ipmi_get_stat(intf, sent_lan_commands)); |
1832 | out += sprintf(out, "sent_lan_command_errs: %d\n", | 1914 | out += sprintf(out, "sent_lan_command_errs: %u\n", |
1833 | intf->sent_lan_command_errs); | 1915 | ipmi_get_stat(intf, sent_lan_command_errs)); |
1834 | out += sprintf(out, "retransmitted_lan_commands: %d\n", | 1916 | out += sprintf(out, "retransmitted_lan_commands: %u\n", |
1835 | intf->retransmitted_lan_commands); | 1917 | ipmi_get_stat(intf, retransmitted_lan_commands)); |
1836 | out += sprintf(out, "timed_out_lan_commands: %d\n", | 1918 | out += sprintf(out, "timed_out_lan_commands: %u\n", |
1837 | intf->timed_out_lan_commands); | 1919 | ipmi_get_stat(intf, timed_out_lan_commands)); |
1838 | out += sprintf(out, "sent_lan_responses: %d\n", | 1920 | out += sprintf(out, "sent_lan_responses: %u\n", |
1839 | intf->sent_lan_responses); | 1921 | ipmi_get_stat(intf, sent_lan_responses)); |
1840 | out += sprintf(out, "handled_lan_responses: %d\n", | 1922 | out += sprintf(out, "handled_lan_responses: %u\n", |
1841 | intf->handled_lan_responses); | 1923 | ipmi_get_stat(intf, handled_lan_responses)); |
1842 | out += sprintf(out, "invalid_lan_responses: %d\n", | 1924 | out += sprintf(out, "invalid_lan_responses: %u\n", |
1843 | intf->invalid_lan_responses); | 1925 | ipmi_get_stat(intf, invalid_lan_responses)); |
1844 | out += sprintf(out, "unhandled_lan_responses: %d\n", | 1926 | out += sprintf(out, "unhandled_lan_responses: %u\n", |
1845 | intf->unhandled_lan_responses); | 1927 | ipmi_get_stat(intf, unhandled_lan_responses)); |
1846 | out += sprintf(out, "handled_commands: %d\n", | 1928 | out += sprintf(out, "handled_commands: %u\n", |
1847 | intf->handled_commands); | 1929 | ipmi_get_stat(intf, handled_commands)); |
1848 | out += sprintf(out, "invalid_commands: %d\n", | 1930 | out += sprintf(out, "invalid_commands: %u\n", |
1849 | intf->invalid_commands); | 1931 | ipmi_get_stat(intf, invalid_commands)); |
1850 | out += sprintf(out, "unhandled_commands: %d\n", | 1932 | out += sprintf(out, "unhandled_commands: %u\n", |
1851 | intf->unhandled_commands); | 1933 | ipmi_get_stat(intf, unhandled_commands)); |
1852 | out += sprintf(out, "invalid_events: %d\n", | 1934 | out += sprintf(out, "invalid_events: %u\n", |
1853 | intf->invalid_events); | 1935 | ipmi_get_stat(intf, invalid_events)); |
1854 | out += sprintf(out, "events: %d\n", | 1936 | out += sprintf(out, "events: %u\n", |
1855 | intf->events); | 1937 | ipmi_get_stat(intf, events)); |
1856 | 1938 | ||
1857 | return (out - ((char *) page)); | 1939 | return (out - ((char *) page)); |
1858 | } | 1940 | } |
1859 | #endif /* CONFIG_PROC_FS */ | 1941 | #endif /* CONFIG_PROC_FS */ |
1860 | 1942 | ||
1861 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | 1943 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, |
1862 | read_proc_t *read_proc, write_proc_t *write_proc, | 1944 | read_proc_t *read_proc, |
1863 | void *data, struct module *owner) | 1945 | void *data, struct module *owner) |
1864 | { | 1946 | { |
1865 | int rv = 0; | 1947 | int rv = 0; |
@@ -1886,7 +1968,6 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | |||
1886 | } else { | 1968 | } else { |
1887 | file->data = data; | 1969 | file->data = data; |
1888 | file->read_proc = read_proc; | 1970 | file->read_proc = read_proc; |
1889 | file->write_proc = write_proc; | ||
1890 | file->owner = owner; | 1971 | file->owner = owner; |
1891 | 1972 | ||
1892 | mutex_lock(&smi->proc_entry_lock); | 1973 | mutex_lock(&smi->proc_entry_lock); |
@@ -1899,6 +1980,7 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | |||
1899 | 1980 | ||
1900 | return rv; | 1981 | return rv; |
1901 | } | 1982 | } |
1983 | EXPORT_SYMBOL(ipmi_smi_add_proc_entry); | ||
1902 | 1984 | ||
1903 | static int add_proc_entries(ipmi_smi_t smi, int num) | 1985 | static int add_proc_entries(ipmi_smi_t smi, int num) |
1904 | { | 1986 | { |
@@ -1909,23 +1991,22 @@ static int add_proc_entries(ipmi_smi_t smi, int num) | |||
1909 | smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); | 1991 | smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); |
1910 | if (!smi->proc_dir) | 1992 | if (!smi->proc_dir) |
1911 | rv = -ENOMEM; | 1993 | rv = -ENOMEM; |
1912 | else { | 1994 | else |
1913 | smi->proc_dir->owner = THIS_MODULE; | 1995 | smi->proc_dir->owner = THIS_MODULE; |
1914 | } | ||
1915 | 1996 | ||
1916 | if (rv == 0) | 1997 | if (rv == 0) |
1917 | rv = ipmi_smi_add_proc_entry(smi, "stats", | 1998 | rv = ipmi_smi_add_proc_entry(smi, "stats", |
1918 | stat_file_read_proc, NULL, | 1999 | stat_file_read_proc, |
1919 | smi, THIS_MODULE); | 2000 | smi, THIS_MODULE); |
1920 | 2001 | ||
1921 | if (rv == 0) | 2002 | if (rv == 0) |
1922 | rv = ipmi_smi_add_proc_entry(smi, "ipmb", | 2003 | rv = ipmi_smi_add_proc_entry(smi, "ipmb", |
1923 | ipmb_file_read_proc, NULL, | 2004 | ipmb_file_read_proc, |
1924 | smi, THIS_MODULE); | 2005 | smi, THIS_MODULE); |
1925 | 2006 | ||
1926 | if (rv == 0) | 2007 | if (rv == 0) |
1927 | rv = ipmi_smi_add_proc_entry(smi, "version", | 2008 | rv = ipmi_smi_add_proc_entry(smi, "version", |
1928 | version_file_read_proc, NULL, | 2009 | version_file_read_proc, |
1929 | smi, THIS_MODULE); | 2010 | smi, THIS_MODULE); |
1930 | #endif /* CONFIG_PROC_FS */ | 2011 | #endif /* CONFIG_PROC_FS */ |
1931 | 2012 | ||
@@ -2210,37 +2291,47 @@ static int create_files(struct bmc_device *bmc) | |||
2210 | 2291 | ||
2211 | err = device_create_file(&bmc->dev->dev, | 2292 | err = device_create_file(&bmc->dev->dev, |
2212 | &bmc->device_id_attr); | 2293 | &bmc->device_id_attr); |
2213 | if (err) goto out; | 2294 | if (err) |
2295 | goto out; | ||
2214 | err = device_create_file(&bmc->dev->dev, | 2296 | err = device_create_file(&bmc->dev->dev, |
2215 | &bmc->provides_dev_sdrs_attr); | 2297 | &bmc->provides_dev_sdrs_attr); |
2216 | if (err) goto out_devid; | 2298 | if (err) |
2299 | goto out_devid; | ||
2217 | err = device_create_file(&bmc->dev->dev, | 2300 | err = device_create_file(&bmc->dev->dev, |
2218 | &bmc->revision_attr); | 2301 | &bmc->revision_attr); |
2219 | if (err) goto out_sdrs; | 2302 | if (err) |
2303 | goto out_sdrs; | ||
2220 | err = device_create_file(&bmc->dev->dev, | 2304 | err = device_create_file(&bmc->dev->dev, |
2221 | &bmc->firmware_rev_attr); | 2305 | &bmc->firmware_rev_attr); |
2222 | if (err) goto out_rev; | 2306 | if (err) |
2307 | goto out_rev; | ||
2223 | err = device_create_file(&bmc->dev->dev, | 2308 | err = device_create_file(&bmc->dev->dev, |
2224 | &bmc->version_attr); | 2309 | &bmc->version_attr); |
2225 | if (err) goto out_firm; | 2310 | if (err) |
2311 | goto out_firm; | ||
2226 | err = device_create_file(&bmc->dev->dev, | 2312 | err = device_create_file(&bmc->dev->dev, |
2227 | &bmc->add_dev_support_attr); | 2313 | &bmc->add_dev_support_attr); |
2228 | if (err) goto out_version; | 2314 | if (err) |
2315 | goto out_version; | ||
2229 | err = device_create_file(&bmc->dev->dev, | 2316 | err = device_create_file(&bmc->dev->dev, |
2230 | &bmc->manufacturer_id_attr); | 2317 | &bmc->manufacturer_id_attr); |
2231 | if (err) goto out_add_dev; | 2318 | if (err) |
2319 | goto out_add_dev; | ||
2232 | err = device_create_file(&bmc->dev->dev, | 2320 | err = device_create_file(&bmc->dev->dev, |
2233 | &bmc->product_id_attr); | 2321 | &bmc->product_id_attr); |
2234 | if (err) goto out_manu; | 2322 | if (err) |
2323 | goto out_manu; | ||
2235 | if (bmc->id.aux_firmware_revision_set) { | 2324 | if (bmc->id.aux_firmware_revision_set) { |
2236 | err = device_create_file(&bmc->dev->dev, | 2325 | err = device_create_file(&bmc->dev->dev, |
2237 | &bmc->aux_firmware_rev_attr); | 2326 | &bmc->aux_firmware_rev_attr); |
2238 | if (err) goto out_prod_id; | 2327 | if (err) |
2328 | goto out_prod_id; | ||
2239 | } | 2329 | } |
2240 | if (bmc->guid_set) { | 2330 | if (bmc->guid_set) { |
2241 | err = device_create_file(&bmc->dev->dev, | 2331 | err = device_create_file(&bmc->dev->dev, |
2242 | &bmc->guid_attr); | 2332 | &bmc->guid_attr); |
2243 | if (err) goto out_aux_firm; | 2333 | if (err) |
2334 | goto out_aux_firm; | ||
2244 | } | 2335 | } |
2245 | 2336 | ||
2246 | return 0; | 2337 | return 0; |
@@ -2368,8 +2459,10 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, | |||
2368 | "ipmi_msghandler:" | 2459 | "ipmi_msghandler:" |
2369 | " Unable to register bmc device: %d\n", | 2460 | " Unable to register bmc device: %d\n", |
2370 | rv); | 2461 | rv); |
2371 | /* Don't go to out_err, you can only do that if | 2462 | /* |
2372 | the device is registered already. */ | 2463 | * Don't go to out_err, you can only do that if |
2464 | * the device is registered already. | ||
2465 | */ | ||
2373 | return rv; | 2466 | return rv; |
2374 | } | 2467 | } |
2375 | 2468 | ||
@@ -2560,17 +2653,18 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
2560 | 2653 | ||
2561 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) | 2654 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) |
2562 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) | 2655 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) |
2563 | && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) | 2656 | && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { |
2564 | { | ||
2565 | /* It's the one we want */ | 2657 | /* It's the one we want */ |
2566 | if (msg->msg.data[0] != 0) { | 2658 | if (msg->msg.data[0] != 0) { |
2567 | /* Got an error from the channel, just go on. */ | 2659 | /* Got an error from the channel, just go on. */ |
2568 | 2660 | ||
2569 | if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { | 2661 | if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { |
2570 | /* If the MC does not support this | 2662 | /* |
2571 | command, that is legal. We just | 2663 | * If the MC does not support this |
2572 | assume it has one IPMB at channel | 2664 | * command, that is legal. We just |
2573 | zero. */ | 2665 | * assume it has one IPMB at channel |
2666 | * zero. | ||
2667 | */ | ||
2574 | intf->channels[0].medium | 2668 | intf->channels[0].medium |
2575 | = IPMI_CHANNEL_MEDIUM_IPMB; | 2669 | = IPMI_CHANNEL_MEDIUM_IPMB; |
2576 | intf->channels[0].protocol | 2670 | intf->channels[0].protocol |
@@ -2591,7 +2685,7 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
2591 | intf->channels[chan].medium = msg->msg.data[2] & 0x7f; | 2685 | intf->channels[chan].medium = msg->msg.data[2] & 0x7f; |
2592 | intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; | 2686 | intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; |
2593 | 2687 | ||
2594 | next_channel: | 2688 | next_channel: |
2595 | intf->curr_channel++; | 2689 | intf->curr_channel++; |
2596 | if (intf->curr_channel >= IPMI_MAX_CHANNELS) | 2690 | if (intf->curr_channel >= IPMI_MAX_CHANNELS) |
2597 | wake_up(&intf->waitq); | 2691 | wake_up(&intf->waitq); |
@@ -2619,6 +2713,7 @@ void ipmi_poll_interface(ipmi_user_t user) | |||
2619 | if (intf->handlers->poll) | 2713 | if (intf->handlers->poll) |
2620 | intf->handlers->poll(intf->send_info); | 2714 | intf->handlers->poll(intf->send_info); |
2621 | } | 2715 | } |
2716 | EXPORT_SYMBOL(ipmi_poll_interface); | ||
2622 | 2717 | ||
2623 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | 2718 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, |
2624 | void *send_info, | 2719 | void *send_info, |
@@ -2633,14 +2728,18 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2633 | ipmi_smi_t tintf; | 2728 | ipmi_smi_t tintf; |
2634 | struct list_head *link; | 2729 | struct list_head *link; |
2635 | 2730 | ||
2636 | /* Make sure the driver is actually initialized, this handles | 2731 | /* |
2637 | problems with initialization order. */ | 2732 | * Make sure the driver is actually initialized, this handles |
2733 | * problems with initialization order. | ||
2734 | */ | ||
2638 | if (!initialized) { | 2735 | if (!initialized) { |
2639 | rv = ipmi_init_msghandler(); | 2736 | rv = ipmi_init_msghandler(); |
2640 | if (rv) | 2737 | if (rv) |
2641 | return rv; | 2738 | return rv; |
2642 | /* The init code doesn't return an error if it was turned | 2739 | /* |
2643 | off, but it won't initialize. Check that. */ | 2740 | * The init code doesn't return an error if it was turned |
2741 | * off, but it won't initialize. Check that. | ||
2742 | */ | ||
2644 | if (!initialized) | 2743 | if (!initialized) |
2645 | return -ENODEV; | 2744 | return -ENODEV; |
2646 | } | 2745 | } |
@@ -2688,8 +2787,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2688 | spin_lock_init(&intf->maintenance_mode_lock); | 2787 | spin_lock_init(&intf->maintenance_mode_lock); |
2689 | INIT_LIST_HEAD(&intf->cmd_rcvrs); | 2788 | INIT_LIST_HEAD(&intf->cmd_rcvrs); |
2690 | init_waitqueue_head(&intf->waitq); | 2789 | init_waitqueue_head(&intf->waitq); |
2790 | for (i = 0; i < IPMI_NUM_STATS; i++) | ||
2791 | atomic_set(&intf->stats[i], 0); | ||
2691 | 2792 | ||
2692 | spin_lock_init(&intf->counter_lock); | ||
2693 | intf->proc_dir = NULL; | 2793 | intf->proc_dir = NULL; |
2694 | 2794 | ||
2695 | mutex_lock(&smi_watchers_mutex); | 2795 | mutex_lock(&smi_watchers_mutex); |
@@ -2717,11 +2817,12 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2717 | get_guid(intf); | 2817 | get_guid(intf); |
2718 | 2818 | ||
2719 | if ((intf->ipmi_version_major > 1) | 2819 | if ((intf->ipmi_version_major > 1) |
2720 | || ((intf->ipmi_version_major == 1) | 2820 | || ((intf->ipmi_version_major == 1) |
2721 | && (intf->ipmi_version_minor >= 5))) | 2821 | && (intf->ipmi_version_minor >= 5))) { |
2722 | { | 2822 | /* |
2723 | /* Start scanning the channels to see what is | 2823 | * Start scanning the channels to see what is |
2724 | available. */ | 2824 | * available. |
2825 | */ | ||
2725 | intf->null_user_handler = channel_handler; | 2826 | intf->null_user_handler = channel_handler; |
2726 | intf->curr_channel = 0; | 2827 | intf->curr_channel = 0; |
2727 | rv = send_channel_info_cmd(intf, 0); | 2828 | rv = send_channel_info_cmd(intf, 0); |
@@ -2769,6 +2870,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2769 | 2870 | ||
2770 | return rv; | 2871 | return rv; |
2771 | } | 2872 | } |
2873 | EXPORT_SYMBOL(ipmi_register_smi); | ||
2772 | 2874 | ||
2773 | static void cleanup_smi_msgs(ipmi_smi_t intf) | 2875 | static void cleanup_smi_msgs(ipmi_smi_t intf) |
2774 | { | 2876 | { |
@@ -2803,8 +2905,10 @@ int ipmi_unregister_smi(ipmi_smi_t intf) | |||
2803 | 2905 | ||
2804 | remove_proc_entries(intf); | 2906 | remove_proc_entries(intf); |
2805 | 2907 | ||
2806 | /* Call all the watcher interfaces to tell them that | 2908 | /* |
2807 | an interface is gone. */ | 2909 | * Call all the watcher interfaces to tell them that |
2910 | * an interface is gone. | ||
2911 | */ | ||
2808 | list_for_each_entry(w, &smi_watchers, link) | 2912 | list_for_each_entry(w, &smi_watchers, link) |
2809 | w->smi_gone(intf_num); | 2913 | w->smi_gone(intf_num); |
2810 | mutex_unlock(&smi_watchers_mutex); | 2914 | mutex_unlock(&smi_watchers_mutex); |
@@ -2812,22 +2916,21 @@ int ipmi_unregister_smi(ipmi_smi_t intf) | |||
2812 | kref_put(&intf->refcount, intf_free); | 2916 | kref_put(&intf->refcount, intf_free); |
2813 | return 0; | 2917 | return 0; |
2814 | } | 2918 | } |
2919 | EXPORT_SYMBOL(ipmi_unregister_smi); | ||
2815 | 2920 | ||
2816 | static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, | 2921 | static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, |
2817 | struct ipmi_smi_msg *msg) | 2922 | struct ipmi_smi_msg *msg) |
2818 | { | 2923 | { |
2819 | struct ipmi_ipmb_addr ipmb_addr; | 2924 | struct ipmi_ipmb_addr ipmb_addr; |
2820 | struct ipmi_recv_msg *recv_msg; | 2925 | struct ipmi_recv_msg *recv_msg; |
2821 | unsigned long flags; | ||
2822 | 2926 | ||
2823 | 2927 | /* | |
2824 | /* This is 11, not 10, because the response must contain a | 2928 | * This is 11, not 10, because the response must contain a |
2825 | * completion code. */ | 2929 | * completion code. |
2930 | */ | ||
2826 | if (msg->rsp_size < 11) { | 2931 | if (msg->rsp_size < 11) { |
2827 | /* Message not big enough, just ignore it. */ | 2932 | /* Message not big enough, just ignore it. */ |
2828 | spin_lock_irqsave(&intf->counter_lock, flags); | 2933 | ipmi_inc_stat(intf, invalid_ipmb_responses); |
2829 | intf->invalid_ipmb_responses++; | ||
2830 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2831 | return 0; | 2934 | return 0; |
2832 | } | 2935 | } |
2833 | 2936 | ||
@@ -2841,37 +2944,38 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, | |||
2841 | ipmb_addr.channel = msg->rsp[3] & 0x0f; | 2944 | ipmb_addr.channel = msg->rsp[3] & 0x0f; |
2842 | ipmb_addr.lun = msg->rsp[7] & 3; | 2945 | ipmb_addr.lun = msg->rsp[7] & 3; |
2843 | 2946 | ||
2844 | /* It's a response from a remote entity. Look up the sequence | 2947 | /* |
2845 | number and handle the response. */ | 2948 | * It's a response from a remote entity. Look up the sequence |
2949 | * number and handle the response. | ||
2950 | */ | ||
2846 | if (intf_find_seq(intf, | 2951 | if (intf_find_seq(intf, |
2847 | msg->rsp[7] >> 2, | 2952 | msg->rsp[7] >> 2, |
2848 | msg->rsp[3] & 0x0f, | 2953 | msg->rsp[3] & 0x0f, |
2849 | msg->rsp[8], | 2954 | msg->rsp[8], |
2850 | (msg->rsp[4] >> 2) & (~1), | 2955 | (msg->rsp[4] >> 2) & (~1), |
2851 | (struct ipmi_addr *) &(ipmb_addr), | 2956 | (struct ipmi_addr *) &(ipmb_addr), |
2852 | &recv_msg)) | 2957 | &recv_msg)) { |
2853 | { | 2958 | /* |
2854 | /* We were unable to find the sequence number, | 2959 | * We were unable to find the sequence number, |
2855 | so just nuke the message. */ | 2960 | * so just nuke the message. |
2856 | spin_lock_irqsave(&intf->counter_lock, flags); | 2961 | */ |
2857 | intf->unhandled_ipmb_responses++; | 2962 | ipmi_inc_stat(intf, unhandled_ipmb_responses); |
2858 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2859 | return 0; | 2963 | return 0; |
2860 | } | 2964 | } |
2861 | 2965 | ||
2862 | memcpy(recv_msg->msg_data, | 2966 | memcpy(recv_msg->msg_data, |
2863 | &(msg->rsp[9]), | 2967 | &(msg->rsp[9]), |
2864 | msg->rsp_size - 9); | 2968 | msg->rsp_size - 9); |
2865 | /* THe other fields matched, so no need to set them, except | 2969 | /* |
2866 | for netfn, which needs to be the response that was | 2970 | * The other fields matched, so no need to set them, except |
2867 | returned, not the request value. */ | 2971 | * for netfn, which needs to be the response that was |
2972 | * returned, not the request value. | ||
2973 | */ | ||
2868 | recv_msg->msg.netfn = msg->rsp[4] >> 2; | 2974 | recv_msg->msg.netfn = msg->rsp[4] >> 2; |
2869 | recv_msg->msg.data = recv_msg->msg_data; | 2975 | recv_msg->msg.data = recv_msg->msg_data; |
2870 | recv_msg->msg.data_len = msg->rsp_size - 10; | 2976 | recv_msg->msg.data_len = msg->rsp_size - 10; |
2871 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 2977 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
2872 | spin_lock_irqsave(&intf->counter_lock, flags); | 2978 | ipmi_inc_stat(intf, handled_ipmb_responses); |
2873 | intf->handled_ipmb_responses++; | ||
2874 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2875 | deliver_response(recv_msg); | 2979 | deliver_response(recv_msg); |
2876 | 2980 | ||
2877 | return 0; | 2981 | return 0; |
@@ -2888,14 +2992,11 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2888 | ipmi_user_t user = NULL; | 2992 | ipmi_user_t user = NULL; |
2889 | struct ipmi_ipmb_addr *ipmb_addr; | 2993 | struct ipmi_ipmb_addr *ipmb_addr; |
2890 | struct ipmi_recv_msg *recv_msg; | 2994 | struct ipmi_recv_msg *recv_msg; |
2891 | unsigned long flags; | ||
2892 | struct ipmi_smi_handlers *handlers; | 2995 | struct ipmi_smi_handlers *handlers; |
2893 | 2996 | ||
2894 | if (msg->rsp_size < 10) { | 2997 | if (msg->rsp_size < 10) { |
2895 | /* Message not big enough, just ignore it. */ | 2998 | /* Message not big enough, just ignore it. */ |
2896 | spin_lock_irqsave(&intf->counter_lock, flags); | 2999 | ipmi_inc_stat(intf, invalid_commands); |
2897 | intf->invalid_commands++; | ||
2898 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2899 | return 0; | 3000 | return 0; |
2900 | } | 3001 | } |
2901 | 3002 | ||
@@ -2919,19 +3020,17 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2919 | 3020 | ||
2920 | if (user == NULL) { | 3021 | if (user == NULL) { |
2921 | /* We didn't find a user, deliver an error response. */ | 3022 | /* We didn't find a user, deliver an error response. */ |
2922 | spin_lock_irqsave(&intf->counter_lock, flags); | 3023 | ipmi_inc_stat(intf, unhandled_commands); |
2923 | intf->unhandled_commands++; | ||
2924 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2925 | 3024 | ||
2926 | msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); | 3025 | msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); |
2927 | msg->data[1] = IPMI_SEND_MSG_CMD; | 3026 | msg->data[1] = IPMI_SEND_MSG_CMD; |
2928 | msg->data[2] = msg->rsp[3]; | 3027 | msg->data[2] = msg->rsp[3]; |
2929 | msg->data[3] = msg->rsp[6]; | 3028 | msg->data[3] = msg->rsp[6]; |
2930 | msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); | 3029 | msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); |
2931 | msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); | 3030 | msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); |
2932 | msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; | 3031 | msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; |
2933 | /* rqseq/lun */ | 3032 | /* rqseq/lun */ |
2934 | msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); | 3033 | msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); |
2935 | msg->data[8] = msg->rsp[8]; /* cmd */ | 3034 | msg->data[8] = msg->rsp[8]; /* cmd */ |
2936 | msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; | 3035 | msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; |
2937 | msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); | 3036 | msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); |
@@ -2950,23 +3049,25 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2950 | handlers = intf->handlers; | 3049 | handlers = intf->handlers; |
2951 | if (handlers) { | 3050 | if (handlers) { |
2952 | handlers->sender(intf->send_info, msg, 0); | 3051 | handlers->sender(intf->send_info, msg, 0); |
2953 | /* We used the message, so return the value | 3052 | /* |
2954 | that causes it to not be freed or | 3053 | * We used the message, so return the value |
2955 | queued. */ | 3054 | * that causes it to not be freed or |
3055 | * queued. | ||
3056 | */ | ||
2956 | rv = -1; | 3057 | rv = -1; |
2957 | } | 3058 | } |
2958 | rcu_read_unlock(); | 3059 | rcu_read_unlock(); |
2959 | } else { | 3060 | } else { |
2960 | /* Deliver the message to the user. */ | 3061 | /* Deliver the message to the user. */ |
2961 | spin_lock_irqsave(&intf->counter_lock, flags); | 3062 | ipmi_inc_stat(intf, handled_commands); |
2962 | intf->handled_commands++; | ||
2963 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2964 | 3063 | ||
2965 | recv_msg = ipmi_alloc_recv_msg(); | 3064 | recv_msg = ipmi_alloc_recv_msg(); |
2966 | if (!recv_msg) { | 3065 | if (!recv_msg) { |
2967 | /* We couldn't allocate memory for the | 3066 | /* |
2968 | message, so requeue it for handling | 3067 | * We couldn't allocate memory for the |
2969 | later. */ | 3068 | * message, so requeue it for handling |
3069 | * later. | ||
3070 | */ | ||
2970 | rv = 1; | 3071 | rv = 1; |
2971 | kref_put(&user->refcount, free_user); | 3072 | kref_put(&user->refcount, free_user); |
2972 | } else { | 3073 | } else { |
@@ -2977,8 +3078,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2977 | ipmb_addr->lun = msg->rsp[7] & 3; | 3078 | ipmb_addr->lun = msg->rsp[7] & 3; |
2978 | ipmb_addr->channel = msg->rsp[3] & 0xf; | 3079 | ipmb_addr->channel = msg->rsp[3] & 0xf; |
2979 | 3080 | ||
2980 | /* Extract the rest of the message information | 3081 | /* |
2981 | from the IPMB header.*/ | 3082 | * Extract the rest of the message information |
3083 | * from the IPMB header. | ||
3084 | */ | ||
2982 | recv_msg->user = user; | 3085 | recv_msg->user = user; |
2983 | recv_msg->recv_type = IPMI_CMD_RECV_TYPE; | 3086 | recv_msg->recv_type = IPMI_CMD_RECV_TYPE; |
2984 | recv_msg->msgid = msg->rsp[7] >> 2; | 3087 | recv_msg->msgid = msg->rsp[7] >> 2; |
@@ -2986,8 +3089,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2986 | recv_msg->msg.cmd = msg->rsp[8]; | 3089 | recv_msg->msg.cmd = msg->rsp[8]; |
2987 | recv_msg->msg.data = recv_msg->msg_data; | 3090 | recv_msg->msg.data = recv_msg->msg_data; |
2988 | 3091 | ||
2989 | /* We chop off 10, not 9 bytes because the checksum | 3092 | /* |
2990 | at the end also needs to be removed. */ | 3093 | * We chop off 10, not 9 bytes because the checksum |
3094 | * at the end also needs to be removed. | ||
3095 | */ | ||
2991 | recv_msg->msg.data_len = msg->rsp_size - 10; | 3096 | recv_msg->msg.data_len = msg->rsp_size - 10; |
2992 | memcpy(recv_msg->msg_data, | 3097 | memcpy(recv_msg->msg_data, |
2993 | &(msg->rsp[9]), | 3098 | &(msg->rsp[9]), |
@@ -3004,16 +3109,15 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf, | |||
3004 | { | 3109 | { |
3005 | struct ipmi_lan_addr lan_addr; | 3110 | struct ipmi_lan_addr lan_addr; |
3006 | struct ipmi_recv_msg *recv_msg; | 3111 | struct ipmi_recv_msg *recv_msg; |
3007 | unsigned long flags; | ||
3008 | 3112 | ||
3009 | 3113 | ||
3010 | /* This is 13, not 12, because the response must contain a | 3114 | /* |
3011 | * completion code. */ | 3115 | * This is 13, not 12, because the response must contain a |
3116 | * completion code. | ||
3117 | */ | ||
3012 | if (msg->rsp_size < 13) { | 3118 | if (msg->rsp_size < 13) { |
3013 | /* Message not big enough, just ignore it. */ | 3119 | /* Message not big enough, just ignore it. */ |
3014 | spin_lock_irqsave(&intf->counter_lock, flags); | 3120 | ipmi_inc_stat(intf, invalid_lan_responses); |
3015 | intf->invalid_lan_responses++; | ||
3016 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3017 | return 0; | 3121 | return 0; |
3018 | } | 3122 | } |
3019 | 3123 | ||
@@ -3030,37 +3134,38 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf, | |||
3030 | lan_addr.privilege = msg->rsp[3] >> 4; | 3134 | lan_addr.privilege = msg->rsp[3] >> 4; |
3031 | lan_addr.lun = msg->rsp[9] & 3; | 3135 | lan_addr.lun = msg->rsp[9] & 3; |
3032 | 3136 | ||
3033 | /* It's a response from a remote entity. Look up the sequence | 3137 | /* |
3034 | number and handle the response. */ | 3138 | * It's a response from a remote entity. Look up the sequence |
3139 | * number and handle the response. | ||
3140 | */ | ||
3035 | if (intf_find_seq(intf, | 3141 | if (intf_find_seq(intf, |
3036 | msg->rsp[9] >> 2, | 3142 | msg->rsp[9] >> 2, |
3037 | msg->rsp[3] & 0x0f, | 3143 | msg->rsp[3] & 0x0f, |
3038 | msg->rsp[10], | 3144 | msg->rsp[10], |
3039 | (msg->rsp[6] >> 2) & (~1), | 3145 | (msg->rsp[6] >> 2) & (~1), |
3040 | (struct ipmi_addr *) &(lan_addr), | 3146 | (struct ipmi_addr *) &(lan_addr), |
3041 | &recv_msg)) | 3147 | &recv_msg)) { |
3042 | { | 3148 | /* |
3043 | /* We were unable to find the sequence number, | 3149 | * We were unable to find the sequence number, |
3044 | so just nuke the message. */ | 3150 | * so just nuke the message. |
3045 | spin_lock_irqsave(&intf->counter_lock, flags); | 3151 | */ |
3046 | intf->unhandled_lan_responses++; | 3152 | ipmi_inc_stat(intf, unhandled_lan_responses); |
3047 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3048 | return 0; | 3153 | return 0; |
3049 | } | 3154 | } |
3050 | 3155 | ||
3051 | memcpy(recv_msg->msg_data, | 3156 | memcpy(recv_msg->msg_data, |
3052 | &(msg->rsp[11]), | 3157 | &(msg->rsp[11]), |
3053 | msg->rsp_size - 11); | 3158 | msg->rsp_size - 11); |
3054 | /* The other fields matched, so no need to set them, except | 3159 | /* |
3055 | for netfn, which needs to be the response that was | 3160 | * The other fields matched, so no need to set them, except |
3056 | returned, not the request value. */ | 3161 | * for netfn, which needs to be the response that was |
3162 | * returned, not the request value. | ||
3163 | */ | ||
3057 | recv_msg->msg.netfn = msg->rsp[6] >> 2; | 3164 | recv_msg->msg.netfn = msg->rsp[6] >> 2; |
3058 | recv_msg->msg.data = recv_msg->msg_data; | 3165 | recv_msg->msg.data = recv_msg->msg_data; |
3059 | recv_msg->msg.data_len = msg->rsp_size - 12; | 3166 | recv_msg->msg.data_len = msg->rsp_size - 12; |
3060 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 3167 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
3061 | spin_lock_irqsave(&intf->counter_lock, flags); | 3168 | ipmi_inc_stat(intf, handled_lan_responses); |
3062 | intf->handled_lan_responses++; | ||
3063 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3064 | deliver_response(recv_msg); | 3169 | deliver_response(recv_msg); |
3065 | 3170 | ||
3066 | return 0; | 3171 | return 0; |
@@ -3077,13 +3182,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3077 | ipmi_user_t user = NULL; | 3182 | ipmi_user_t user = NULL; |
3078 | struct ipmi_lan_addr *lan_addr; | 3183 | struct ipmi_lan_addr *lan_addr; |
3079 | struct ipmi_recv_msg *recv_msg; | 3184 | struct ipmi_recv_msg *recv_msg; |
3080 | unsigned long flags; | ||
3081 | 3185 | ||
3082 | if (msg->rsp_size < 12) { | 3186 | if (msg->rsp_size < 12) { |
3083 | /* Message not big enough, just ignore it. */ | 3187 | /* Message not big enough, just ignore it. */ |
3084 | spin_lock_irqsave(&intf->counter_lock, flags); | 3188 | ipmi_inc_stat(intf, invalid_commands); |
3085 | intf->invalid_commands++; | ||
3086 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3087 | return 0; | 3189 | return 0; |
3088 | } | 3190 | } |
3089 | 3191 | ||
@@ -3107,23 +3209,23 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3107 | 3209 | ||
3108 | if (user == NULL) { | 3210 | if (user == NULL) { |
3109 | /* We didn't find a user, just give up. */ | 3211 | /* We didn't find a user, just give up. */ |
3110 | spin_lock_irqsave(&intf->counter_lock, flags); | 3212 | ipmi_inc_stat(intf, unhandled_commands); |
3111 | intf->unhandled_commands++; | ||
3112 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3113 | 3213 | ||
3114 | rv = 0; /* Don't do anything with these messages, just | 3214 | /* |
3115 | allow them to be freed. */ | 3215 | * Don't do anything with these messages, just allow |
3216 | * them to be freed. | ||
3217 | */ | ||
3218 | rv = 0; | ||
3116 | } else { | 3219 | } else { |
3117 | /* Deliver the message to the user. */ | 3220 | /* Deliver the message to the user. */ |
3118 | spin_lock_irqsave(&intf->counter_lock, flags); | 3221 | ipmi_inc_stat(intf, handled_commands); |
3119 | intf->handled_commands++; | ||
3120 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3121 | 3222 | ||
3122 | recv_msg = ipmi_alloc_recv_msg(); | 3223 | recv_msg = ipmi_alloc_recv_msg(); |
3123 | if (!recv_msg) { | 3224 | if (!recv_msg) { |
3124 | /* We couldn't allocate memory for the | 3225 | /* |
3125 | message, so requeue it for handling | 3226 | * We couldn't allocate memory for the |
3126 | later. */ | 3227 | * message, so requeue it for handling later. |
3228 | */ | ||
3127 | rv = 1; | 3229 | rv = 1; |
3128 | kref_put(&user->refcount, free_user); | 3230 | kref_put(&user->refcount, free_user); |
3129 | } else { | 3231 | } else { |
@@ -3137,8 +3239,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3137 | lan_addr->channel = msg->rsp[3] & 0xf; | 3239 | lan_addr->channel = msg->rsp[3] & 0xf; |
3138 | lan_addr->privilege = msg->rsp[3] >> 4; | 3240 | lan_addr->privilege = msg->rsp[3] >> 4; |
3139 | 3241 | ||
3140 | /* Extract the rest of the message information | 3242 | /* |
3141 | from the IPMB header.*/ | 3243 | * Extract the rest of the message information |
3244 | * from the IPMB header. | ||
3245 | */ | ||
3142 | recv_msg->user = user; | 3246 | recv_msg->user = user; |
3143 | recv_msg->recv_type = IPMI_CMD_RECV_TYPE; | 3247 | recv_msg->recv_type = IPMI_CMD_RECV_TYPE; |
3144 | recv_msg->msgid = msg->rsp[9] >> 2; | 3248 | recv_msg->msgid = msg->rsp[9] >> 2; |
@@ -3146,8 +3250,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3146 | recv_msg->msg.cmd = msg->rsp[10]; | 3250 | recv_msg->msg.cmd = msg->rsp[10]; |
3147 | recv_msg->msg.data = recv_msg->msg_data; | 3251 | recv_msg->msg.data = recv_msg->msg_data; |
3148 | 3252 | ||
3149 | /* We chop off 12, not 11 bytes because the checksum | 3253 | /* |
3150 | at the end also needs to be removed. */ | 3254 | * We chop off 12, not 11 bytes because the checksum |
3255 | * at the end also needs to be removed. | ||
3256 | */ | ||
3151 | recv_msg->msg.data_len = msg->rsp_size - 12; | 3257 | recv_msg->msg.data_len = msg->rsp_size - 12; |
3152 | memcpy(recv_msg->msg_data, | 3258 | memcpy(recv_msg->msg_data, |
3153 | &(msg->rsp[11]), | 3259 | &(msg->rsp[11]), |
@@ -3163,7 +3269,7 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, | |||
3163 | struct ipmi_smi_msg *msg) | 3269 | struct ipmi_smi_msg *msg) |
3164 | { | 3270 | { |
3165 | struct ipmi_system_interface_addr *smi_addr; | 3271 | struct ipmi_system_interface_addr *smi_addr; |
3166 | 3272 | ||
3167 | recv_msg->msgid = 0; | 3273 | recv_msg->msgid = 0; |
3168 | smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); | 3274 | smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); |
3169 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 3275 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
@@ -3189,9 +3295,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3189 | 3295 | ||
3190 | if (msg->rsp_size < 19) { | 3296 | if (msg->rsp_size < 19) { |
3191 | /* Message is too small to be an IPMB event. */ | 3297 | /* Message is too small to be an IPMB event. */ |
3192 | spin_lock_irqsave(&intf->counter_lock, flags); | 3298 | ipmi_inc_stat(intf, invalid_events); |
3193 | intf->invalid_events++; | ||
3194 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3195 | return 0; | 3299 | return 0; |
3196 | } | 3300 | } |
3197 | 3301 | ||
@@ -3204,12 +3308,12 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3204 | 3308 | ||
3205 | spin_lock_irqsave(&intf->events_lock, flags); | 3309 | spin_lock_irqsave(&intf->events_lock, flags); |
3206 | 3310 | ||
3207 | spin_lock(&intf->counter_lock); | 3311 | ipmi_inc_stat(intf, events); |
3208 | intf->events++; | ||
3209 | spin_unlock(&intf->counter_lock); | ||
3210 | 3312 | ||
3211 | /* Allocate and fill in one message for every user that is getting | 3313 | /* |
3212 | events. */ | 3314 | * Allocate and fill in one message for every user that is |
3315 | * getting events. | ||
3316 | */ | ||
3213 | rcu_read_lock(); | 3317 | rcu_read_lock(); |
3214 | list_for_each_entry_rcu(user, &intf->users, link) { | 3318 | list_for_each_entry_rcu(user, &intf->users, link) { |
3215 | if (!user->gets_events) | 3319 | if (!user->gets_events) |
@@ -3223,9 +3327,11 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3223 | list_del(&recv_msg->link); | 3327 | list_del(&recv_msg->link); |
3224 | ipmi_free_recv_msg(recv_msg); | 3328 | ipmi_free_recv_msg(recv_msg); |
3225 | } | 3329 | } |
3226 | /* We couldn't allocate memory for the | 3330 | /* |
3227 | message, so requeue it for handling | 3331 | * We couldn't allocate memory for the |
3228 | later. */ | 3332 | * message, so requeue it for handling |
3333 | * later. | ||
3334 | */ | ||
3229 | rv = 1; | 3335 | rv = 1; |
3230 | goto out; | 3336 | goto out; |
3231 | } | 3337 | } |
@@ -3246,13 +3352,17 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3246 | deliver_response(recv_msg); | 3352 | deliver_response(recv_msg); |
3247 | } | 3353 | } |
3248 | } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { | 3354 | } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { |
3249 | /* No one to receive the message, put it in queue if there's | 3355 | /* |
3250 | not already too many things in the queue. */ | 3356 | * No one to receive the message, put it in queue if there's |
3357 | * not already too many things in the queue. | ||
3358 | */ | ||
3251 | recv_msg = ipmi_alloc_recv_msg(); | 3359 | recv_msg = ipmi_alloc_recv_msg(); |
3252 | if (!recv_msg) { | 3360 | if (!recv_msg) { |
3253 | /* We couldn't allocate memory for the | 3361 | /* |
3254 | message, so requeue it for handling | 3362 | * We couldn't allocate memory for the |
3255 | later. */ | 3363 | * message, so requeue it for handling |
3364 | * later. | ||
3365 | */ | ||
3256 | rv = 1; | 3366 | rv = 1; |
3257 | goto out; | 3367 | goto out; |
3258 | } | 3368 | } |
@@ -3260,11 +3370,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3260 | copy_event_into_recv_msg(recv_msg, msg); | 3370 | copy_event_into_recv_msg(recv_msg, msg); |
3261 | list_add_tail(&(recv_msg->link), &(intf->waiting_events)); | 3371 | list_add_tail(&(recv_msg->link), &(intf->waiting_events)); |
3262 | intf->waiting_events_count++; | 3372 | intf->waiting_events_count++; |
3263 | } else { | 3373 | } else if (!intf->event_msg_printed) { |
3264 | /* There's too many things in the queue, discard this | 3374 | /* |
3265 | message. */ | 3375 | * There's too many things in the queue, discard this |
3266 | printk(KERN_WARNING PFX "Event queue full, discarding an" | 3376 | * message. |
3267 | " incoming event\n"); | 3377 | */ |
3378 | printk(KERN_WARNING PFX "Event queue full, discarding" | ||
3379 | " incoming events\n"); | ||
3380 | intf->event_msg_printed = 1; | ||
3268 | } | 3381 | } |
3269 | 3382 | ||
3270 | out: | 3383 | out: |
@@ -3277,16 +3390,15 @@ static int handle_bmc_rsp(ipmi_smi_t intf, | |||
3277 | struct ipmi_smi_msg *msg) | 3390 | struct ipmi_smi_msg *msg) |
3278 | { | 3391 | { |
3279 | struct ipmi_recv_msg *recv_msg; | 3392 | struct ipmi_recv_msg *recv_msg; |
3280 | unsigned long flags; | ||
3281 | struct ipmi_user *user; | 3393 | struct ipmi_user *user; |
3282 | 3394 | ||
3283 | recv_msg = (struct ipmi_recv_msg *) msg->user_data; | 3395 | recv_msg = (struct ipmi_recv_msg *) msg->user_data; |
3284 | if (recv_msg == NULL) | 3396 | if (recv_msg == NULL) { |
3285 | { | 3397 | printk(KERN_WARNING |
3286 | printk(KERN_WARNING"IPMI message received with no owner. This\n" | 3398 | "IPMI message received with no owner. This\n" |
3287 | "could be because of a malformed message, or\n" | 3399 | "could be because of a malformed message, or\n" |
3288 | "because of a hardware error. Contact your\n" | 3400 | "because of a hardware error. Contact your\n" |
3289 | "hardware vender for assistance\n"); | 3401 | "hardware vender for assistance\n"); |
3290 | return 0; | 3402 | return 0; |
3291 | } | 3403 | } |
3292 | 3404 | ||
@@ -3294,16 +3406,12 @@ static int handle_bmc_rsp(ipmi_smi_t intf, | |||
3294 | /* Make sure the user still exists. */ | 3406 | /* Make sure the user still exists. */ |
3295 | if (user && !user->valid) { | 3407 | if (user && !user->valid) { |
3296 | /* The user for the message went away, so give up. */ | 3408 | /* The user for the message went away, so give up. */ |
3297 | spin_lock_irqsave(&intf->counter_lock, flags); | 3409 | ipmi_inc_stat(intf, unhandled_local_responses); |
3298 | intf->unhandled_local_responses++; | ||
3299 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3300 | ipmi_free_recv_msg(recv_msg); | 3410 | ipmi_free_recv_msg(recv_msg); |
3301 | } else { | 3411 | } else { |
3302 | struct ipmi_system_interface_addr *smi_addr; | 3412 | struct ipmi_system_interface_addr *smi_addr; |
3303 | 3413 | ||
3304 | spin_lock_irqsave(&intf->counter_lock, flags); | 3414 | ipmi_inc_stat(intf, handled_local_responses); |
3305 | intf->handled_local_responses++; | ||
3306 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3307 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 3415 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
3308 | recv_msg->msgid = msg->msgid; | 3416 | recv_msg->msgid = msg->msgid; |
3309 | smi_addr = ((struct ipmi_system_interface_addr *) | 3417 | smi_addr = ((struct ipmi_system_interface_addr *) |
@@ -3324,9 +3432,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf, | |||
3324 | return 0; | 3432 | return 0; |
3325 | } | 3433 | } |
3326 | 3434 | ||
3327 | /* Handle a new message. Return 1 if the message should be requeued, | 3435 | /* |
3328 | 0 if the message should be freed, or -1 if the message should not | 3436 | * Handle a new message. Return 1 if the message should be requeued, |
3329 | be freed or requeued. */ | 3437 | * 0 if the message should be freed, or -1 if the message should not |
3438 | * be freed or requeued. | ||
3439 | */ | ||
3330 | static int handle_new_recv_msg(ipmi_smi_t intf, | 3440 | static int handle_new_recv_msg(ipmi_smi_t intf, |
3331 | struct ipmi_smi_msg *msg) | 3441 | struct ipmi_smi_msg *msg) |
3332 | { | 3442 | { |
@@ -3351,10 +3461,12 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3351 | msg->rsp[1] = msg->data[1]; | 3461 | msg->rsp[1] = msg->data[1]; |
3352 | msg->rsp[2] = IPMI_ERR_UNSPECIFIED; | 3462 | msg->rsp[2] = IPMI_ERR_UNSPECIFIED; |
3353 | msg->rsp_size = 3; | 3463 | msg->rsp_size = 3; |
3354 | } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */ | 3464 | } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) |
3355 | || (msg->rsp[1] != msg->data[1])) /* Command */ | 3465 | || (msg->rsp[1] != msg->data[1])) { |
3356 | { | 3466 | /* |
3357 | /* The response is not even marginally correct. */ | 3467 | * The NetFN and Command in the response is not even |
3468 | * marginally correct. | ||
3469 | */ | ||
3358 | printk(KERN_WARNING PFX "BMC returned incorrect response," | 3470 | printk(KERN_WARNING PFX "BMC returned incorrect response," |
3359 | " expected netfn %x cmd %x, got netfn %x cmd %x\n", | 3471 | " expected netfn %x cmd %x, got netfn %x cmd %x\n", |
3360 | (msg->data[0] >> 2) | 1, msg->data[1], | 3472 | (msg->data[0] >> 2) | 1, msg->data[1], |
@@ -3369,10 +3481,11 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3369 | 3481 | ||
3370 | if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) | 3482 | if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) |
3371 | && (msg->rsp[1] == IPMI_SEND_MSG_CMD) | 3483 | && (msg->rsp[1] == IPMI_SEND_MSG_CMD) |
3372 | && (msg->user_data != NULL)) | 3484 | && (msg->user_data != NULL)) { |
3373 | { | 3485 | /* |
3374 | /* It's a response to a response we sent. For this we | 3486 | * It's a response to a response we sent. For this we |
3375 | deliver a send message response to the user. */ | 3487 | * deliver a send message response to the user. |
3488 | */ | ||
3376 | struct ipmi_recv_msg *recv_msg = msg->user_data; | 3489 | struct ipmi_recv_msg *recv_msg = msg->user_data; |
3377 | 3490 | ||
3378 | requeue = 0; | 3491 | requeue = 0; |
@@ -3398,8 +3511,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3398 | recv_msg->msg_data[0] = msg->rsp[2]; | 3511 | recv_msg->msg_data[0] = msg->rsp[2]; |
3399 | deliver_response(recv_msg); | 3512 | deliver_response(recv_msg); |
3400 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) | 3513 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) |
3401 | && (msg->rsp[1] == IPMI_GET_MSG_CMD)) | 3514 | && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { |
3402 | { | ||
3403 | /* It's from the receive queue. */ | 3515 | /* It's from the receive queue. */ |
3404 | chan = msg->rsp[3] & 0xf; | 3516 | chan = msg->rsp[3] & 0xf; |
3405 | if (chan >= IPMI_MAX_CHANNELS) { | 3517 | if (chan >= IPMI_MAX_CHANNELS) { |
@@ -3411,12 +3523,16 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3411 | switch (intf->channels[chan].medium) { | 3523 | switch (intf->channels[chan].medium) { |
3412 | case IPMI_CHANNEL_MEDIUM_IPMB: | 3524 | case IPMI_CHANNEL_MEDIUM_IPMB: |
3413 | if (msg->rsp[4] & 0x04) { | 3525 | if (msg->rsp[4] & 0x04) { |
3414 | /* It's a response, so find the | 3526 | /* |
3415 | requesting message and send it up. */ | 3527 | * It's a response, so find the |
3528 | * requesting message and send it up. | ||
3529 | */ | ||
3416 | requeue = handle_ipmb_get_msg_rsp(intf, msg); | 3530 | requeue = handle_ipmb_get_msg_rsp(intf, msg); |
3417 | } else { | 3531 | } else { |
3418 | /* It's a command to the SMS from some other | 3532 | /* |
3419 | entity. Handle that. */ | 3533 | * It's a command to the SMS from some other |
3534 | * entity. Handle that. | ||
3535 | */ | ||
3420 | requeue = handle_ipmb_get_msg_cmd(intf, msg); | 3536 | requeue = handle_ipmb_get_msg_cmd(intf, msg); |
3421 | } | 3537 | } |
3422 | break; | 3538 | break; |
@@ -3424,25 +3540,30 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3424 | case IPMI_CHANNEL_MEDIUM_8023LAN: | 3540 | case IPMI_CHANNEL_MEDIUM_8023LAN: |
3425 | case IPMI_CHANNEL_MEDIUM_ASYNC: | 3541 | case IPMI_CHANNEL_MEDIUM_ASYNC: |
3426 | if (msg->rsp[6] & 0x04) { | 3542 | if (msg->rsp[6] & 0x04) { |
3427 | /* It's a response, so find the | 3543 | /* |
3428 | requesting message and send it up. */ | 3544 | * It's a response, so find the |
3545 | * requesting message and send it up. | ||
3546 | */ | ||
3429 | requeue = handle_lan_get_msg_rsp(intf, msg); | 3547 | requeue = handle_lan_get_msg_rsp(intf, msg); |
3430 | } else { | 3548 | } else { |
3431 | /* It's a command to the SMS from some other | 3549 | /* |
3432 | entity. Handle that. */ | 3550 | * It's a command to the SMS from some other |
3551 | * entity. Handle that. | ||
3552 | */ | ||
3433 | requeue = handle_lan_get_msg_cmd(intf, msg); | 3553 | requeue = handle_lan_get_msg_cmd(intf, msg); |
3434 | } | 3554 | } |
3435 | break; | 3555 | break; |
3436 | 3556 | ||
3437 | default: | 3557 | default: |
3438 | /* We don't handle the channel type, so just | 3558 | /* |
3439 | * free the message. */ | 3559 | * We don't handle the channel type, so just |
3560 | * free the message. | ||
3561 | */ | ||
3440 | requeue = 0; | 3562 | requeue = 0; |
3441 | } | 3563 | } |
3442 | 3564 | ||
3443 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) | 3565 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) |
3444 | && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) | 3566 | && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { |
3445 | { | ||
3446 | /* It's an asyncronous event. */ | 3567 | /* It's an asyncronous event. */ |
3447 | requeue = handle_read_event_rsp(intf, msg); | 3568 | requeue = handle_read_event_rsp(intf, msg); |
3448 | } else { | 3569 | } else { |
@@ -3458,71 +3579,82 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3458 | void ipmi_smi_msg_received(ipmi_smi_t intf, | 3579 | void ipmi_smi_msg_received(ipmi_smi_t intf, |
3459 | struct ipmi_smi_msg *msg) | 3580 | struct ipmi_smi_msg *msg) |
3460 | { | 3581 | { |
3461 | unsigned long flags; | 3582 | unsigned long flags = 0; /* keep us warning-free. */ |
3462 | int rv; | 3583 | int rv; |
3584 | int run_to_completion; | ||
3463 | 3585 | ||
3464 | 3586 | ||
3465 | if ((msg->data_size >= 2) | 3587 | if ((msg->data_size >= 2) |
3466 | && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) | 3588 | && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) |
3467 | && (msg->data[1] == IPMI_SEND_MSG_CMD) | 3589 | && (msg->data[1] == IPMI_SEND_MSG_CMD) |
3468 | && (msg->user_data == NULL)) | 3590 | && (msg->user_data == NULL)) { |
3469 | { | 3591 | /* |
3470 | /* This is the local response to a command send, start | 3592 | * This is the local response to a command send, start |
3471 | the timer for these. The user_data will not be | 3593 | * the timer for these. The user_data will not be |
3472 | NULL if this is a response send, and we will let | 3594 | * NULL if this is a response send, and we will let |
3473 | response sends just go through. */ | 3595 | * response sends just go through. |
3474 | 3596 | */ | |
3475 | /* Check for errors, if we get certain errors (ones | 3597 | |
3476 | that mean basically we can try again later), we | 3598 | /* |
3477 | ignore them and start the timer. Otherwise we | 3599 | * Check for errors, if we get certain errors (ones |
3478 | report the error immediately. */ | 3600 | * that mean basically we can try again later), we |
3601 | * ignore them and start the timer. Otherwise we | ||
3602 | * report the error immediately. | ||
3603 | */ | ||
3479 | if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) | 3604 | if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) |
3480 | && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) | 3605 | && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) |
3481 | && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) | 3606 | && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) |
3482 | && (msg->rsp[2] != IPMI_BUS_ERR) | 3607 | && (msg->rsp[2] != IPMI_BUS_ERR) |
3483 | && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) | 3608 | && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { |
3484 | { | ||
3485 | int chan = msg->rsp[3] & 0xf; | 3609 | int chan = msg->rsp[3] & 0xf; |
3486 | 3610 | ||
3487 | /* Got an error sending the message, handle it. */ | 3611 | /* Got an error sending the message, handle it. */ |
3488 | spin_lock_irqsave(&intf->counter_lock, flags); | ||
3489 | if (chan >= IPMI_MAX_CHANNELS) | 3612 | if (chan >= IPMI_MAX_CHANNELS) |
3490 | ; /* This shouldn't happen */ | 3613 | ; /* This shouldn't happen */ |
3491 | else if ((intf->channels[chan].medium | 3614 | else if ((intf->channels[chan].medium |
3492 | == IPMI_CHANNEL_MEDIUM_8023LAN) | 3615 | == IPMI_CHANNEL_MEDIUM_8023LAN) |
3493 | || (intf->channels[chan].medium | 3616 | || (intf->channels[chan].medium |
3494 | == IPMI_CHANNEL_MEDIUM_ASYNC)) | 3617 | == IPMI_CHANNEL_MEDIUM_ASYNC)) |
3495 | intf->sent_lan_command_errs++; | 3618 | ipmi_inc_stat(intf, sent_lan_command_errs); |
3496 | else | 3619 | else |
3497 | intf->sent_ipmb_command_errs++; | 3620 | ipmi_inc_stat(intf, sent_ipmb_command_errs); |
3498 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3499 | intf_err_seq(intf, msg->msgid, msg->rsp[2]); | 3621 | intf_err_seq(intf, msg->msgid, msg->rsp[2]); |
3500 | } else { | 3622 | } else |
3501 | /* The message was sent, start the timer. */ | 3623 | /* The message was sent, start the timer. */ |
3502 | intf_start_seq_timer(intf, msg->msgid); | 3624 | intf_start_seq_timer(intf, msg->msgid); |
3503 | } | ||
3504 | 3625 | ||
3505 | ipmi_free_smi_msg(msg); | 3626 | ipmi_free_smi_msg(msg); |
3506 | goto out; | 3627 | goto out; |
3507 | } | 3628 | } |
3508 | 3629 | ||
3509 | /* To preserve message order, if the list is not empty, we | 3630 | /* |
3510 | tack this message onto the end of the list. */ | 3631 | * To preserve message order, if the list is not empty, we |
3511 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | 3632 | * tack this message onto the end of the list. |
3633 | */ | ||
3634 | run_to_completion = intf->run_to_completion; | ||
3635 | if (!run_to_completion) | ||
3636 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | ||
3512 | if (!list_empty(&intf->waiting_msgs)) { | 3637 | if (!list_empty(&intf->waiting_msgs)) { |
3513 | list_add_tail(&msg->link, &intf->waiting_msgs); | 3638 | list_add_tail(&msg->link, &intf->waiting_msgs); |
3514 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | 3639 | if (!run_to_completion) |
3640 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | ||
3515 | goto out; | 3641 | goto out; |
3516 | } | 3642 | } |
3517 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | 3643 | if (!run_to_completion) |
3518 | 3644 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | |
3645 | |||
3519 | rv = handle_new_recv_msg(intf, msg); | 3646 | rv = handle_new_recv_msg(intf, msg); |
3520 | if (rv > 0) { | 3647 | if (rv > 0) { |
3521 | /* Could not handle the message now, just add it to a | 3648 | /* |
3522 | list to handle later. */ | 3649 | * Could not handle the message now, just add it to a |
3523 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | 3650 | * list to handle later. |
3651 | */ | ||
3652 | run_to_completion = intf->run_to_completion; | ||
3653 | if (!run_to_completion) | ||
3654 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | ||
3524 | list_add_tail(&msg->link, &intf->waiting_msgs); | 3655 | list_add_tail(&msg->link, &intf->waiting_msgs); |
3525 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | 3656 | if (!run_to_completion) |
3657 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | ||
3526 | } else if (rv == 0) { | 3658 | } else if (rv == 0) { |
3527 | ipmi_free_smi_msg(msg); | 3659 | ipmi_free_smi_msg(msg); |
3528 | } | 3660 | } |
@@ -3530,6 +3662,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, | |||
3530 | out: | 3662 | out: |
3531 | return; | 3663 | return; |
3532 | } | 3664 | } |
3665 | EXPORT_SYMBOL(ipmi_smi_msg_received); | ||
3533 | 3666 | ||
3534 | void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) | 3667 | void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) |
3535 | { | 3668 | { |
@@ -3544,7 +3677,7 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) | |||
3544 | } | 3677 | } |
3545 | rcu_read_unlock(); | 3678 | rcu_read_unlock(); |
3546 | } | 3679 | } |
3547 | 3680 | EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); | |
3548 | 3681 | ||
3549 | static struct ipmi_smi_msg * | 3682 | static struct ipmi_smi_msg * |
3550 | smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, | 3683 | smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, |
@@ -3552,14 +3685,16 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, | |||
3552 | { | 3685 | { |
3553 | struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); | 3686 | struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); |
3554 | if (!smi_msg) | 3687 | if (!smi_msg) |
3555 | /* If we can't allocate the message, then just return, we | 3688 | /* |
3556 | get 4 retries, so this should be ok. */ | 3689 | * If we can't allocate the message, then just return, we |
3690 | * get 4 retries, so this should be ok. | ||
3691 | */ | ||
3557 | return NULL; | 3692 | return NULL; |
3558 | 3693 | ||
3559 | memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); | 3694 | memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); |
3560 | smi_msg->data_size = recv_msg->msg.data_len; | 3695 | smi_msg->data_size = recv_msg->msg.data_len; |
3561 | smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); | 3696 | smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); |
3562 | 3697 | ||
3563 | #ifdef DEBUG_MSGING | 3698 | #ifdef DEBUG_MSGING |
3564 | { | 3699 | { |
3565 | int m; | 3700 | int m; |
@@ -3594,28 +3729,26 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3594 | ent->inuse = 0; | 3729 | ent->inuse = 0; |
3595 | msg = ent->recv_msg; | 3730 | msg = ent->recv_msg; |
3596 | list_add_tail(&msg->link, timeouts); | 3731 | list_add_tail(&msg->link, timeouts); |
3597 | spin_lock(&intf->counter_lock); | ||
3598 | if (ent->broadcast) | 3732 | if (ent->broadcast) |
3599 | intf->timed_out_ipmb_broadcasts++; | 3733 | ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); |
3600 | else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) | 3734 | else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) |
3601 | intf->timed_out_lan_commands++; | 3735 | ipmi_inc_stat(intf, timed_out_lan_commands); |
3602 | else | 3736 | else |
3603 | intf->timed_out_ipmb_commands++; | 3737 | ipmi_inc_stat(intf, timed_out_ipmb_commands); |
3604 | spin_unlock(&intf->counter_lock); | ||
3605 | } else { | 3738 | } else { |
3606 | struct ipmi_smi_msg *smi_msg; | 3739 | struct ipmi_smi_msg *smi_msg; |
3607 | /* More retries, send again. */ | 3740 | /* More retries, send again. */ |
3608 | 3741 | ||
3609 | /* Start with the max timer, set to normal | 3742 | /* |
3610 | timer after the message is sent. */ | 3743 | * Start with the max timer, set to normal timer after |
3744 | * the message is sent. | ||
3745 | */ | ||
3611 | ent->timeout = MAX_MSG_TIMEOUT; | 3746 | ent->timeout = MAX_MSG_TIMEOUT; |
3612 | ent->retries_left--; | 3747 | ent->retries_left--; |
3613 | spin_lock(&intf->counter_lock); | ||
3614 | if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) | 3748 | if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) |
3615 | intf->retransmitted_lan_commands++; | 3749 | ipmi_inc_stat(intf, retransmitted_lan_commands); |
3616 | else | 3750 | else |
3617 | intf->retransmitted_ipmb_commands++; | 3751 | ipmi_inc_stat(intf, retransmitted_ipmb_commands); |
3618 | spin_unlock(&intf->counter_lock); | ||
3619 | 3752 | ||
3620 | smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, | 3753 | smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, |
3621 | ent->seqid); | 3754 | ent->seqid); |
@@ -3624,11 +3757,13 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3624 | 3757 | ||
3625 | spin_unlock_irqrestore(&intf->seq_lock, *flags); | 3758 | spin_unlock_irqrestore(&intf->seq_lock, *flags); |
3626 | 3759 | ||
3627 | /* Send the new message. We send with a zero | 3760 | /* |
3628 | * priority. It timed out, I doubt time is | 3761 | * Send the new message. We send with a zero |
3629 | * that critical now, and high priority | 3762 | * priority. It timed out, I doubt time is that |
3630 | * messages are really only for messages to the | 3763 | * critical now, and high priority messages are really |
3631 | * local MC, which don't get resent. */ | 3764 | * only for messages to the local MC, which don't get |
3765 | * resent. | ||
3766 | */ | ||
3632 | handlers = intf->handlers; | 3767 | handlers = intf->handlers; |
3633 | if (handlers) | 3768 | if (handlers) |
3634 | intf->handlers->sender(intf->send_info, | 3769 | intf->handlers->sender(intf->send_info, |
@@ -3659,16 +3794,20 @@ static void ipmi_timeout_handler(long timeout_period) | |||
3659 | list_del(&smi_msg->link); | 3794 | list_del(&smi_msg->link); |
3660 | ipmi_free_smi_msg(smi_msg); | 3795 | ipmi_free_smi_msg(smi_msg); |
3661 | } else { | 3796 | } else { |
3662 | /* To preserve message order, quit if we | 3797 | /* |
3663 | can't handle a message. */ | 3798 | * To preserve message order, quit if we |
3799 | * can't handle a message. | ||
3800 | */ | ||
3664 | break; | 3801 | break; |
3665 | } | 3802 | } |
3666 | } | 3803 | } |
3667 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | 3804 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); |
3668 | 3805 | ||
3669 | /* Go through the seq table and find any messages that | 3806 | /* |
3670 | have timed out, putting them in the timeouts | 3807 | * Go through the seq table and find any messages that |
3671 | list. */ | 3808 | * have timed out, putting them in the timeouts |
3809 | * list. | ||
3810 | */ | ||
3672 | INIT_LIST_HEAD(&timeouts); | 3811 | INIT_LIST_HEAD(&timeouts); |
3673 | spin_lock_irqsave(&intf->seq_lock, flags); | 3812 | spin_lock_irqsave(&intf->seq_lock, flags); |
3674 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) | 3813 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) |
@@ -3694,8 +3833,7 @@ static void ipmi_timeout_handler(long timeout_period) | |||
3694 | intf->auto_maintenance_timeout | 3833 | intf->auto_maintenance_timeout |
3695 | -= timeout_period; | 3834 | -= timeout_period; |
3696 | if (!intf->maintenance_mode | 3835 | if (!intf->maintenance_mode |
3697 | && (intf->auto_maintenance_timeout <= 0)) | 3836 | && (intf->auto_maintenance_timeout <= 0)) { |
3698 | { | ||
3699 | intf->maintenance_mode_enable = 0; | 3837 | intf->maintenance_mode_enable = 0; |
3700 | maintenance_mode_update(intf); | 3838 | maintenance_mode_update(intf); |
3701 | } | 3839 | } |
@@ -3713,8 +3851,10 @@ static void ipmi_request_event(void) | |||
3713 | struct ipmi_smi_handlers *handlers; | 3851 | struct ipmi_smi_handlers *handlers; |
3714 | 3852 | ||
3715 | rcu_read_lock(); | 3853 | rcu_read_lock(); |
3716 | /* Called from the timer, no need to check if handlers is | 3854 | /* |
3717 | * valid. */ | 3855 | * Called from the timer, no need to check if handlers is |
3856 | * valid. | ||
3857 | */ | ||
3718 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 3858 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
3719 | /* No event requests when in maintenance mode. */ | 3859 | /* No event requests when in maintenance mode. */ |
3720 | if (intf->maintenance_mode_enable) | 3860 | if (intf->maintenance_mode_enable) |
@@ -3735,10 +3875,12 @@ static struct timer_list ipmi_timer; | |||
3735 | /* How many jiffies does it take to get to the timeout time. */ | 3875 | /* How many jiffies does it take to get to the timeout time. */ |
3736 | #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) | 3876 | #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) |
3737 | 3877 | ||
3738 | /* Request events from the queue every second (this is the number of | 3878 | /* |
3739 | IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the | 3879 | * Request events from the queue every second (this is the number of |
3740 | future, IPMI will add a way to know immediately if an event is in | 3880 | * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the |
3741 | the queue and this silliness can go away. */ | 3881 | * future, IPMI will add a way to know immediately if an event is in |
3882 | * the queue and this silliness can go away. | ||
3883 | */ | ||
3742 | #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) | 3884 | #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) |
3743 | 3885 | ||
3744 | static atomic_t stop_operation; | 3886 | static atomic_t stop_operation; |
@@ -3782,6 +3924,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) | |||
3782 | } | 3924 | } |
3783 | return rv; | 3925 | return rv; |
3784 | } | 3926 | } |
3927 | EXPORT_SYMBOL(ipmi_alloc_smi_msg); | ||
3785 | 3928 | ||
3786 | static void free_recv_msg(struct ipmi_recv_msg *msg) | 3929 | static void free_recv_msg(struct ipmi_recv_msg *msg) |
3787 | { | 3930 | { |
@@ -3789,7 +3932,7 @@ static void free_recv_msg(struct ipmi_recv_msg *msg) | |||
3789 | kfree(msg); | 3932 | kfree(msg); |
3790 | } | 3933 | } |
3791 | 3934 | ||
3792 | struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) | 3935 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) |
3793 | { | 3936 | { |
3794 | struct ipmi_recv_msg *rv; | 3937 | struct ipmi_recv_msg *rv; |
3795 | 3938 | ||
@@ -3808,6 +3951,7 @@ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) | |||
3808 | kref_put(&msg->user->refcount, free_user); | 3951 | kref_put(&msg->user->refcount, free_user); |
3809 | msg->done(msg); | 3952 | msg->done(msg); |
3810 | } | 3953 | } |
3954 | EXPORT_SYMBOL(ipmi_free_recv_msg); | ||
3811 | 3955 | ||
3812 | #ifdef CONFIG_IPMI_PANIC_EVENT | 3956 | #ifdef CONFIG_IPMI_PANIC_EVENT |
3813 | 3957 | ||
@@ -3825,8 +3969,7 @@ static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
3825 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) | 3969 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) |
3826 | && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) | 3970 | && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) |
3827 | && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) | 3971 | && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) |
3828 | && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) | 3972 | && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { |
3829 | { | ||
3830 | /* A get event receiver command, save it. */ | 3973 | /* A get event receiver command, save it. */ |
3831 | intf->event_receiver = msg->msg.data[1]; | 3974 | intf->event_receiver = msg->msg.data[1]; |
3832 | intf->event_receiver_lun = msg->msg.data[2] & 0x3; | 3975 | intf->event_receiver_lun = msg->msg.data[2] & 0x3; |
@@ -3838,10 +3981,11 @@ static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
3838 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) | 3981 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) |
3839 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) | 3982 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) |
3840 | && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) | 3983 | && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) |
3841 | && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) | 3984 | && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { |
3842 | { | 3985 | /* |
3843 | /* A get device id command, save if we are an event | 3986 | * A get device id command, save if we are an event |
3844 | receiver or generator. */ | 3987 | * receiver or generator. |
3988 | */ | ||
3845 | intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; | 3989 | intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; |
3846 | intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; | 3990 | intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; |
3847 | } | 3991 | } |
@@ -3874,8 +4018,10 @@ static void send_panic_events(char *str) | |||
3874 | data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ | 4018 | data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ |
3875 | data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ | 4019 | data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ |
3876 | 4020 | ||
3877 | /* Put a few breadcrumbs in. Hopefully later we can add more things | 4021 | /* |
3878 | to make the panic events more useful. */ | 4022 | * Put a few breadcrumbs in. Hopefully later we can add more things |
4023 | * to make the panic events more useful. | ||
4024 | */ | ||
3879 | if (str) { | 4025 | if (str) { |
3880 | data[3] = str[0]; | 4026 | data[3] = str[0]; |
3881 | data[6] = str[1]; | 4027 | data[6] = str[1]; |
@@ -3891,6 +4037,7 @@ static void send_panic_events(char *str) | |||
3891 | /* Interface is not ready. */ | 4037 | /* Interface is not ready. */ |
3892 | continue; | 4038 | continue; |
3893 | 4039 | ||
4040 | intf->run_to_completion = 1; | ||
3894 | /* Send the event announcing the panic. */ | 4041 | /* Send the event announcing the panic. */ |
3895 | intf->handlers->set_run_to_completion(intf->send_info, 1); | 4042 | intf->handlers->set_run_to_completion(intf->send_info, 1); |
3896 | i_ipmi_request(NULL, | 4043 | i_ipmi_request(NULL, |
@@ -3908,9 +4055,11 @@ static void send_panic_events(char *str) | |||
3908 | } | 4055 | } |
3909 | 4056 | ||
3910 | #ifdef CONFIG_IPMI_PANIC_STRING | 4057 | #ifdef CONFIG_IPMI_PANIC_STRING |
3911 | /* On every interface, dump a bunch of OEM event holding the | 4058 | /* |
3912 | string. */ | 4059 | * On every interface, dump a bunch of OEM event holding the |
3913 | if (!str) | 4060 | * string. |
4061 | */ | ||
4062 | if (!str) | ||
3914 | return; | 4063 | return; |
3915 | 4064 | ||
3916 | /* For every registered interface, send the event. */ | 4065 | /* For every registered interface, send the event. */ |
@@ -3931,11 +4080,13 @@ static void send_panic_events(char *str) | |||
3931 | */ | 4080 | */ |
3932 | smp_rmb(); | 4081 | smp_rmb(); |
3933 | 4082 | ||
3934 | /* First job here is to figure out where to send the | 4083 | /* |
3935 | OEM events. There's no way in IPMI to send OEM | 4084 | * First job here is to figure out where to send the |
3936 | events using an event send command, so we have to | 4085 | * OEM events. There's no way in IPMI to send OEM |
3937 | find the SEL to put them in and stick them in | 4086 | * events using an event send command, so we have to |
3938 | there. */ | 4087 | * find the SEL to put them in and stick them in |
4088 | * there. | ||
4089 | */ | ||
3939 | 4090 | ||
3940 | /* Get capabilities from the get device id. */ | 4091 | /* Get capabilities from the get device id. */ |
3941 | intf->local_sel_device = 0; | 4092 | intf->local_sel_device = 0; |
@@ -3983,24 +4134,29 @@ static void send_panic_events(char *str) | |||
3983 | } | 4134 | } |
3984 | intf->null_user_handler = NULL; | 4135 | intf->null_user_handler = NULL; |
3985 | 4136 | ||
3986 | /* Validate the event receiver. The low bit must not | 4137 | /* |
3987 | be 1 (it must be a valid IPMB address), it cannot | 4138 | * Validate the event receiver. The low bit must not |
3988 | be zero, and it must not be my address. */ | 4139 | * be 1 (it must be a valid IPMB address), it cannot |
3989 | if (((intf->event_receiver & 1) == 0) | 4140 | * be zero, and it must not be my address. |
4141 | */ | ||
4142 | if (((intf->event_receiver & 1) == 0) | ||
3990 | && (intf->event_receiver != 0) | 4143 | && (intf->event_receiver != 0) |
3991 | && (intf->event_receiver != intf->channels[0].address)) | 4144 | && (intf->event_receiver != intf->channels[0].address)) { |
3992 | { | 4145 | /* |
3993 | /* The event receiver is valid, send an IPMB | 4146 | * The event receiver is valid, send an IPMB |
3994 | message. */ | 4147 | * message. |
4148 | */ | ||
3995 | ipmb = (struct ipmi_ipmb_addr *) &addr; | 4149 | ipmb = (struct ipmi_ipmb_addr *) &addr; |
3996 | ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; | 4150 | ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; |
3997 | ipmb->channel = 0; /* FIXME - is this right? */ | 4151 | ipmb->channel = 0; /* FIXME - is this right? */ |
3998 | ipmb->lun = intf->event_receiver_lun; | 4152 | ipmb->lun = intf->event_receiver_lun; |
3999 | ipmb->slave_addr = intf->event_receiver; | 4153 | ipmb->slave_addr = intf->event_receiver; |
4000 | } else if (intf->local_sel_device) { | 4154 | } else if (intf->local_sel_device) { |
4001 | /* The event receiver was not valid (or was | 4155 | /* |
4002 | me), but I am an SEL device, just dump it | 4156 | * The event receiver was not valid (or was |
4003 | in my SEL. */ | 4157 | * me), but I am an SEL device, just dump it |
4158 | * in my SEL. | ||
4159 | */ | ||
4004 | si = (struct ipmi_system_interface_addr *) &addr; | 4160 | si = (struct ipmi_system_interface_addr *) &addr; |
4005 | si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 4161 | si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
4006 | si->channel = IPMI_BMC_CHANNEL; | 4162 | si->channel = IPMI_BMC_CHANNEL; |
@@ -4008,7 +4164,6 @@ static void send_panic_events(char *str) | |||
4008 | } else | 4164 | } else |
4009 | continue; /* No where to send the event. */ | 4165 | continue; /* No where to send the event. */ |
4010 | 4166 | ||
4011 | |||
4012 | msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ | 4167 | msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ |
4013 | msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; | 4168 | msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; |
4014 | msg.data = data; | 4169 | msg.data = data; |
@@ -4025,8 +4180,10 @@ static void send_panic_events(char *str) | |||
4025 | data[2] = 0xf0; /* OEM event without timestamp. */ | 4180 | data[2] = 0xf0; /* OEM event without timestamp. */ |
4026 | data[3] = intf->channels[0].address; | 4181 | data[3] = intf->channels[0].address; |
4027 | data[4] = j++; /* sequence # */ | 4182 | data[4] = j++; /* sequence # */ |
4028 | /* Always give 11 bytes, so strncpy will fill | 4183 | /* |
4029 | it with zeroes for me. */ | 4184 | * Always give 11 bytes, so strncpy will fill |
4185 | * it with zeroes for me. | ||
4186 | */ | ||
4030 | strncpy(data+5, p, 11); | 4187 | strncpy(data+5, p, 11); |
4031 | p += size; | 4188 | p += size; |
4032 | 4189 | ||
@@ -4043,7 +4200,7 @@ static void send_panic_events(char *str) | |||
4043 | intf->channels[0].lun, | 4200 | intf->channels[0].lun, |
4044 | 0, 1); /* no retry, and no wait. */ | 4201 | 0, 1); /* no retry, and no wait. */ |
4045 | } | 4202 | } |
4046 | } | 4203 | } |
4047 | #endif /* CONFIG_IPMI_PANIC_STRING */ | 4204 | #endif /* CONFIG_IPMI_PANIC_STRING */ |
4048 | } | 4205 | } |
4049 | #endif /* CONFIG_IPMI_PANIC_EVENT */ | 4206 | #endif /* CONFIG_IPMI_PANIC_EVENT */ |
@@ -4052,7 +4209,7 @@ static int has_panicked; | |||
4052 | 4209 | ||
4053 | static int panic_event(struct notifier_block *this, | 4210 | static int panic_event(struct notifier_block *this, |
4054 | unsigned long event, | 4211 | unsigned long event, |
4055 | void *ptr) | 4212 | void *ptr) |
4056 | { | 4213 | { |
4057 | ipmi_smi_t intf; | 4214 | ipmi_smi_t intf; |
4058 | 4215 | ||
@@ -4066,6 +4223,7 @@ static int panic_event(struct notifier_block *this, | |||
4066 | /* Interface is not ready. */ | 4223 | /* Interface is not ready. */ |
4067 | continue; | 4224 | continue; |
4068 | 4225 | ||
4226 | intf->run_to_completion = 1; | ||
4069 | intf->handlers->set_run_to_completion(intf->send_info, 1); | 4227 | intf->handlers->set_run_to_completion(intf->send_info, 1); |
4070 | } | 4228 | } |
4071 | 4229 | ||
@@ -4133,11 +4291,16 @@ static __exit void cleanup_ipmi(void) | |||
4133 | 4291 | ||
4134 | atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); | 4292 | atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); |
4135 | 4293 | ||
4136 | /* This can't be called if any interfaces exist, so no worry about | 4294 | /* |
4137 | shutting down the interfaces. */ | 4295 | * This can't be called if any interfaces exist, so no worry |
4296 | * about shutting down the interfaces. | ||
4297 | */ | ||
4138 | 4298 | ||
4139 | /* Tell the timer to stop, then wait for it to stop. This avoids | 4299 | /* |
4140 | problems with race conditions removing the timer here. */ | 4300 | * Tell the timer to stop, then wait for it to stop. This |
4301 | * avoids problems with race conditions removing the timer | ||
4302 | * here. | ||
4303 | */ | ||
4141 | atomic_inc(&stop_operation); | 4304 | atomic_inc(&stop_operation); |
4142 | del_timer_sync(&ipmi_timer); | 4305 | del_timer_sync(&ipmi_timer); |
4143 | 4306 | ||
@@ -4164,31 +4327,6 @@ module_exit(cleanup_ipmi); | |||
4164 | module_init(ipmi_init_msghandler_mod); | 4327 | module_init(ipmi_init_msghandler_mod); |
4165 | MODULE_LICENSE("GPL"); | 4328 | MODULE_LICENSE("GPL"); |
4166 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); | 4329 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); |
4167 | MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); | 4330 | MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI" |
4331 | " interface."); | ||
4168 | MODULE_VERSION(IPMI_DRIVER_VERSION); | 4332 | MODULE_VERSION(IPMI_DRIVER_VERSION); |
4169 | |||
4170 | EXPORT_SYMBOL(ipmi_create_user); | ||
4171 | EXPORT_SYMBOL(ipmi_destroy_user); | ||
4172 | EXPORT_SYMBOL(ipmi_get_version); | ||
4173 | EXPORT_SYMBOL(ipmi_request_settime); | ||
4174 | EXPORT_SYMBOL(ipmi_request_supply_msgs); | ||
4175 | EXPORT_SYMBOL(ipmi_poll_interface); | ||
4176 | EXPORT_SYMBOL(ipmi_register_smi); | ||
4177 | EXPORT_SYMBOL(ipmi_unregister_smi); | ||
4178 | EXPORT_SYMBOL(ipmi_register_for_cmd); | ||
4179 | EXPORT_SYMBOL(ipmi_unregister_for_cmd); | ||
4180 | EXPORT_SYMBOL(ipmi_smi_msg_received); | ||
4181 | EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); | ||
4182 | EXPORT_SYMBOL(ipmi_alloc_smi_msg); | ||
4183 | EXPORT_SYMBOL(ipmi_addr_length); | ||
4184 | EXPORT_SYMBOL(ipmi_validate_addr); | ||
4185 | EXPORT_SYMBOL(ipmi_set_gets_events); | ||
4186 | EXPORT_SYMBOL(ipmi_smi_watcher_register); | ||
4187 | EXPORT_SYMBOL(ipmi_smi_watcher_unregister); | ||
4188 | EXPORT_SYMBOL(ipmi_set_my_address); | ||
4189 | EXPORT_SYMBOL(ipmi_get_my_address); | ||
4190 | EXPORT_SYMBOL(ipmi_set_my_LUN); | ||
4191 | EXPORT_SYMBOL(ipmi_get_my_LUN); | ||
4192 | EXPORT_SYMBOL(ipmi_smi_add_proc_entry); | ||
4193 | EXPORT_SYMBOL(ipmi_user_set_run_to_completion); | ||
4194 | EXPORT_SYMBOL(ipmi_free_recv_msg); | ||
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index b86186de7f07..a261bd735dfb 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c | |||
@@ -87,7 +87,10 @@ MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " | |||
87 | 87 | ||
88 | /* parameter definition to allow user to flag power cycle */ | 88 | /* parameter definition to allow user to flag power cycle */ |
89 | module_param(poweroff_powercycle, int, 0644); | 89 | module_param(poweroff_powercycle, int, 0644); |
90 | MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); | 90 | MODULE_PARM_DESC(poweroff_powercycle, |
91 | " Set to non-zero to enable power cycle instead of power" | ||
92 | " down. Power cycle is contingent on hardware support," | ||
93 | " otherwise it defaults back to power down."); | ||
91 | 94 | ||
92 | /* Stuff from the get device id command. */ | 95 | /* Stuff from the get device id command. */ |
93 | static unsigned int mfg_id; | 96 | static unsigned int mfg_id; |
@@ -95,22 +98,25 @@ static unsigned int prod_id; | |||
95 | static unsigned char capabilities; | 98 | static unsigned char capabilities; |
96 | static unsigned char ipmi_version; | 99 | static unsigned char ipmi_version; |
97 | 100 | ||
98 | /* We use our own messages for this operation, we don't let the system | 101 | /* |
99 | allocate them, since we may be in a panic situation. The whole | 102 | * We use our own messages for this operation, we don't let the system |
100 | thing is single-threaded, anyway, so multiple messages are not | 103 | * allocate them, since we may be in a panic situation. The whole |
101 | required. */ | 104 | * thing is single-threaded, anyway, so multiple messages are not |
105 | * required. | ||
106 | */ | ||
107 | static atomic_t dummy_count = ATOMIC_INIT(0); | ||
102 | static void dummy_smi_free(struct ipmi_smi_msg *msg) | 108 | static void dummy_smi_free(struct ipmi_smi_msg *msg) |
103 | { | 109 | { |
110 | atomic_dec(&dummy_count); | ||
104 | } | 111 | } |
105 | static void dummy_recv_free(struct ipmi_recv_msg *msg) | 112 | static void dummy_recv_free(struct ipmi_recv_msg *msg) |
106 | { | 113 | { |
114 | atomic_dec(&dummy_count); | ||
107 | } | 115 | } |
108 | static struct ipmi_smi_msg halt_smi_msg = | 116 | static struct ipmi_smi_msg halt_smi_msg = { |
109 | { | ||
110 | .done = dummy_smi_free | 117 | .done = dummy_smi_free |
111 | }; | 118 | }; |
112 | static struct ipmi_recv_msg halt_recv_msg = | 119 | static struct ipmi_recv_msg halt_recv_msg = { |
113 | { | ||
114 | .done = dummy_recv_free | 120 | .done = dummy_recv_free |
115 | }; | 121 | }; |
116 | 122 | ||
@@ -127,8 +133,7 @@ static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data) | |||
127 | complete(comp); | 133 | complete(comp); |
128 | } | 134 | } |
129 | 135 | ||
130 | static struct ipmi_user_hndl ipmi_poweroff_handler = | 136 | static struct ipmi_user_hndl ipmi_poweroff_handler = { |
131 | { | ||
132 | .ipmi_recv_hndl = receive_handler | 137 | .ipmi_recv_hndl = receive_handler |
133 | }; | 138 | }; |
134 | 139 | ||
@@ -152,17 +157,28 @@ static int ipmi_request_wait_for_response(ipmi_user_t user, | |||
152 | return halt_recv_msg.msg.data[0]; | 157 | return halt_recv_msg.msg.data[0]; |
153 | } | 158 | } |
154 | 159 | ||
155 | /* We are in run-to-completion mode, no completion is desired. */ | 160 | /* Wait for message to complete, spinning. */ |
156 | static int ipmi_request_in_rc_mode(ipmi_user_t user, | 161 | static int ipmi_request_in_rc_mode(ipmi_user_t user, |
157 | struct ipmi_addr *addr, | 162 | struct ipmi_addr *addr, |
158 | struct kernel_ipmi_msg *send_msg) | 163 | struct kernel_ipmi_msg *send_msg) |
159 | { | 164 | { |
160 | int rv; | 165 | int rv; |
161 | 166 | ||
167 | atomic_set(&dummy_count, 2); | ||
162 | rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL, | 168 | rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL, |
163 | &halt_smi_msg, &halt_recv_msg, 0); | 169 | &halt_smi_msg, &halt_recv_msg, 0); |
164 | if (rv) | 170 | if (rv) { |
171 | atomic_set(&dummy_count, 0); | ||
165 | return rv; | 172 | return rv; |
173 | } | ||
174 | |||
175 | /* | ||
176 | * Spin until our message is done. | ||
177 | */ | ||
178 | while (atomic_read(&dummy_count) > 0) { | ||
179 | ipmi_poll_interface(user); | ||
180 | cpu_relax(); | ||
181 | } | ||
166 | 182 | ||
167 | return halt_recv_msg.msg.data[0]; | 183 | return halt_recv_msg.msg.data[0]; |
168 | } | 184 | } |
@@ -184,47 +200,47 @@ static int ipmi_request_in_rc_mode(ipmi_user_t user, | |||
184 | 200 | ||
185 | static void (*atca_oem_poweroff_hook)(ipmi_user_t user); | 201 | static void (*atca_oem_poweroff_hook)(ipmi_user_t user); |
186 | 202 | ||
187 | static void pps_poweroff_atca (ipmi_user_t user) | 203 | static void pps_poweroff_atca(ipmi_user_t user) |
188 | { | 204 | { |
189 | struct ipmi_system_interface_addr smi_addr; | 205 | struct ipmi_system_interface_addr smi_addr; |
190 | struct kernel_ipmi_msg send_msg; | 206 | struct kernel_ipmi_msg send_msg; |
191 | int rv; | 207 | int rv; |
192 | /* | 208 | /* |
193 | * Configure IPMI address for local access | 209 | * Configure IPMI address for local access |
194 | */ | 210 | */ |
195 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 211 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
196 | smi_addr.channel = IPMI_BMC_CHANNEL; | 212 | smi_addr.channel = IPMI_BMC_CHANNEL; |
197 | smi_addr.lun = 0; | 213 | smi_addr.lun = 0; |
198 | 214 | ||
199 | printk(KERN_INFO PFX "PPS powerdown hook used"); | 215 | printk(KERN_INFO PFX "PPS powerdown hook used"); |
200 | 216 | ||
201 | send_msg.netfn = IPMI_NETFN_OEM; | 217 | send_msg.netfn = IPMI_NETFN_OEM; |
202 | send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART; | 218 | send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART; |
203 | send_msg.data = IPMI_ATCA_PPS_IANA; | 219 | send_msg.data = IPMI_ATCA_PPS_IANA; |
204 | send_msg.data_len = 3; | 220 | send_msg.data_len = 3; |
205 | rv = ipmi_request_in_rc_mode(user, | 221 | rv = ipmi_request_in_rc_mode(user, |
206 | (struct ipmi_addr *) &smi_addr, | 222 | (struct ipmi_addr *) &smi_addr, |
207 | &send_msg); | 223 | &send_msg); |
208 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { | 224 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { |
209 | printk(KERN_ERR PFX "Unable to send ATCA ," | 225 | printk(KERN_ERR PFX "Unable to send ATCA ," |
210 | " IPMI error 0x%x\n", rv); | 226 | " IPMI error 0x%x\n", rv); |
211 | } | 227 | } |
212 | return; | 228 | return; |
213 | } | 229 | } |
214 | 230 | ||
215 | static int ipmi_atca_detect (ipmi_user_t user) | 231 | static int ipmi_atca_detect(ipmi_user_t user) |
216 | { | 232 | { |
217 | struct ipmi_system_interface_addr smi_addr; | 233 | struct ipmi_system_interface_addr smi_addr; |
218 | struct kernel_ipmi_msg send_msg; | 234 | struct kernel_ipmi_msg send_msg; |
219 | int rv; | 235 | int rv; |
220 | unsigned char data[1]; | 236 | unsigned char data[1]; |
221 | 237 | ||
222 | /* | 238 | /* |
223 | * Configure IPMI address for local access | 239 | * Configure IPMI address for local access |
224 | */ | 240 | */ |
225 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 241 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
226 | smi_addr.channel = IPMI_BMC_CHANNEL; | 242 | smi_addr.channel = IPMI_BMC_CHANNEL; |
227 | smi_addr.lun = 0; | 243 | smi_addr.lun = 0; |
228 | 244 | ||
229 | /* | 245 | /* |
230 | * Use get address info to check and see if we are ATCA | 246 | * Use get address info to check and see if we are ATCA |
@@ -238,28 +254,30 @@ static int ipmi_atca_detect (ipmi_user_t user) | |||
238 | (struct ipmi_addr *) &smi_addr, | 254 | (struct ipmi_addr *) &smi_addr, |
239 | &send_msg); | 255 | &send_msg); |
240 | 256 | ||
241 | printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id); | 257 | printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", |
242 | if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID) | 258 | mfg_id, prod_id); |
243 | && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) { | 259 | if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID) |
244 | printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n"); | 260 | && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) { |
261 | printk(KERN_INFO PFX | ||
262 | "Installing Pigeon Point Systems Poweroff Hook\n"); | ||
245 | atca_oem_poweroff_hook = pps_poweroff_atca; | 263 | atca_oem_poweroff_hook = pps_poweroff_atca; |
246 | } | 264 | } |
247 | return !rv; | 265 | return !rv; |
248 | } | 266 | } |
249 | 267 | ||
250 | static void ipmi_poweroff_atca (ipmi_user_t user) | 268 | static void ipmi_poweroff_atca(ipmi_user_t user) |
251 | { | 269 | { |
252 | struct ipmi_system_interface_addr smi_addr; | 270 | struct ipmi_system_interface_addr smi_addr; |
253 | struct kernel_ipmi_msg send_msg; | 271 | struct kernel_ipmi_msg send_msg; |
254 | int rv; | 272 | int rv; |
255 | unsigned char data[4]; | 273 | unsigned char data[4]; |
256 | 274 | ||
257 | /* | 275 | /* |
258 | * Configure IPMI address for local access | 276 | * Configure IPMI address for local access |
259 | */ | 277 | */ |
260 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 278 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
261 | smi_addr.channel = IPMI_BMC_CHANNEL; | 279 | smi_addr.channel = IPMI_BMC_CHANNEL; |
262 | smi_addr.lun = 0; | 280 | smi_addr.lun = 0; |
263 | 281 | ||
264 | printk(KERN_INFO PFX "Powering down via ATCA power command\n"); | 282 | printk(KERN_INFO PFX "Powering down via ATCA power command\n"); |
265 | 283 | ||
@@ -273,23 +291,24 @@ static void ipmi_poweroff_atca (ipmi_user_t user) | |||
273 | data[2] = 0; /* Power Level */ | 291 | data[2] = 0; /* Power Level */ |
274 | data[3] = 0; /* Don't change saved presets */ | 292 | data[3] = 0; /* Don't change saved presets */ |
275 | send_msg.data = data; | 293 | send_msg.data = data; |
276 | send_msg.data_len = sizeof (data); | 294 | send_msg.data_len = sizeof(data); |
277 | rv = ipmi_request_in_rc_mode(user, | 295 | rv = ipmi_request_in_rc_mode(user, |
278 | (struct ipmi_addr *) &smi_addr, | 296 | (struct ipmi_addr *) &smi_addr, |
279 | &send_msg); | 297 | &send_msg); |
280 | /** At this point, the system may be shutting down, and most | 298 | /* |
281 | ** serial drivers (if used) will have interrupts turned off | 299 | * At this point, the system may be shutting down, and most |
282 | ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE | 300 | * serial drivers (if used) will have interrupts turned off |
283 | ** return code | 301 | * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE |
284 | **/ | 302 | * return code |
285 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { | 303 | */ |
304 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { | ||
286 | printk(KERN_ERR PFX "Unable to send ATCA powerdown message," | 305 | printk(KERN_ERR PFX "Unable to send ATCA powerdown message," |
287 | " IPMI error 0x%x\n", rv); | 306 | " IPMI error 0x%x\n", rv); |
288 | goto out; | 307 | goto out; |
289 | } | 308 | } |
290 | 309 | ||
291 | if(atca_oem_poweroff_hook) | 310 | if (atca_oem_poweroff_hook) |
292 | return atca_oem_poweroff_hook(user); | 311 | atca_oem_poweroff_hook(user); |
293 | out: | 312 | out: |
294 | return; | 313 | return; |
295 | } | 314 | } |
@@ -310,13 +329,13 @@ static void ipmi_poweroff_atca (ipmi_user_t user) | |||
310 | #define IPMI_CPI1_PRODUCT_ID 0x000157 | 329 | #define IPMI_CPI1_PRODUCT_ID 0x000157 |
311 | #define IPMI_CPI1_MANUFACTURER_ID 0x0108 | 330 | #define IPMI_CPI1_MANUFACTURER_ID 0x0108 |
312 | 331 | ||
313 | static int ipmi_cpi1_detect (ipmi_user_t user) | 332 | static int ipmi_cpi1_detect(ipmi_user_t user) |
314 | { | 333 | { |
315 | return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID) | 334 | return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID) |
316 | && (prod_id == IPMI_CPI1_PRODUCT_ID)); | 335 | && (prod_id == IPMI_CPI1_PRODUCT_ID)); |
317 | } | 336 | } |
318 | 337 | ||
319 | static void ipmi_poweroff_cpi1 (ipmi_user_t user) | 338 | static void ipmi_poweroff_cpi1(ipmi_user_t user) |
320 | { | 339 | { |
321 | struct ipmi_system_interface_addr smi_addr; | 340 | struct ipmi_system_interface_addr smi_addr; |
322 | struct ipmi_ipmb_addr ipmb_addr; | 341 | struct ipmi_ipmb_addr ipmb_addr; |
@@ -328,12 +347,12 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user) | |||
328 | unsigned char aer_addr; | 347 | unsigned char aer_addr; |
329 | unsigned char aer_lun; | 348 | unsigned char aer_lun; |
330 | 349 | ||
331 | /* | 350 | /* |
332 | * Configure IPMI address for local access | 351 | * Configure IPMI address for local access |
333 | */ | 352 | */ |
334 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 353 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
335 | smi_addr.channel = IPMI_BMC_CHANNEL; | 354 | smi_addr.channel = IPMI_BMC_CHANNEL; |
336 | smi_addr.lun = 0; | 355 | smi_addr.lun = 0; |
337 | 356 | ||
338 | printk(KERN_INFO PFX "Powering down via CPI1 power command\n"); | 357 | printk(KERN_INFO PFX "Powering down via CPI1 power command\n"); |
339 | 358 | ||
@@ -425,7 +444,7 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user) | |||
425 | */ | 444 | */ |
426 | 445 | ||
427 | #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} | 446 | #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} |
428 | static int ipmi_dell_chassis_detect (ipmi_user_t user) | 447 | static int ipmi_dell_chassis_detect(ipmi_user_t user) |
429 | { | 448 | { |
430 | const char ipmi_version_major = ipmi_version & 0xF; | 449 | const char ipmi_version_major = ipmi_version & 0xF; |
431 | const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; | 450 | const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; |
@@ -444,25 +463,25 @@ static int ipmi_dell_chassis_detect (ipmi_user_t user) | |||
444 | #define IPMI_NETFN_CHASSIS_REQUEST 0 | 463 | #define IPMI_NETFN_CHASSIS_REQUEST 0 |
445 | #define IPMI_CHASSIS_CONTROL_CMD 0x02 | 464 | #define IPMI_CHASSIS_CONTROL_CMD 0x02 |
446 | 465 | ||
447 | static int ipmi_chassis_detect (ipmi_user_t user) | 466 | static int ipmi_chassis_detect(ipmi_user_t user) |
448 | { | 467 | { |
449 | /* Chassis support, use it. */ | 468 | /* Chassis support, use it. */ |
450 | return (capabilities & 0x80); | 469 | return (capabilities & 0x80); |
451 | } | 470 | } |
452 | 471 | ||
453 | static void ipmi_poweroff_chassis (ipmi_user_t user) | 472 | static void ipmi_poweroff_chassis(ipmi_user_t user) |
454 | { | 473 | { |
455 | struct ipmi_system_interface_addr smi_addr; | 474 | struct ipmi_system_interface_addr smi_addr; |
456 | struct kernel_ipmi_msg send_msg; | 475 | struct kernel_ipmi_msg send_msg; |
457 | int rv; | 476 | int rv; |
458 | unsigned char data[1]; | 477 | unsigned char data[1]; |
459 | 478 | ||
460 | /* | 479 | /* |
461 | * Configure IPMI address for local access | 480 | * Configure IPMI address for local access |
462 | */ | 481 | */ |
463 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 482 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
464 | smi_addr.channel = IPMI_BMC_CHANNEL; | 483 | smi_addr.channel = IPMI_BMC_CHANNEL; |
465 | smi_addr.lun = 0; | 484 | smi_addr.lun = 0; |
466 | 485 | ||
467 | powercyclefailed: | 486 | powercyclefailed: |
468 | printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n", | 487 | printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n", |
@@ -525,15 +544,13 @@ static struct poweroff_function poweroff_functions[] = { | |||
525 | 544 | ||
526 | 545 | ||
527 | /* Called on a powerdown request. */ | 546 | /* Called on a powerdown request. */ |
528 | static void ipmi_poweroff_function (void) | 547 | static void ipmi_poweroff_function(void) |
529 | { | 548 | { |
530 | if (!ready) | 549 | if (!ready) |
531 | return; | 550 | return; |
532 | 551 | ||
533 | /* Use run-to-completion mode, since interrupts may be off. */ | 552 | /* Use run-to-completion mode, since interrupts may be off. */ |
534 | ipmi_user_set_run_to_completion(ipmi_user, 1); | ||
535 | specific_poweroff_func(ipmi_user); | 553 | specific_poweroff_func(ipmi_user); |
536 | ipmi_user_set_run_to_completion(ipmi_user, 0); | ||
537 | } | 554 | } |
538 | 555 | ||
539 | /* Wait for an IPMI interface to be installed, the first one installed | 556 | /* Wait for an IPMI interface to be installed, the first one installed |
@@ -561,13 +578,13 @@ static void ipmi_po_new_smi(int if_num, struct device *device) | |||
561 | 578 | ||
562 | ipmi_ifnum = if_num; | 579 | ipmi_ifnum = if_num; |
563 | 580 | ||
564 | /* | 581 | /* |
565 | * Do a get device ide and store some results, since this is | 582 | * Do a get device ide and store some results, since this is |
566 | * used by several functions. | 583 | * used by several functions. |
567 | */ | 584 | */ |
568 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 585 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
569 | smi_addr.channel = IPMI_BMC_CHANNEL; | 586 | smi_addr.channel = IPMI_BMC_CHANNEL; |
570 | smi_addr.lun = 0; | 587 | smi_addr.lun = 0; |
571 | 588 | ||
572 | send_msg.netfn = IPMI_NETFN_APP_REQUEST; | 589 | send_msg.netfn = IPMI_NETFN_APP_REQUEST; |
573 | send_msg.cmd = IPMI_GET_DEVICE_ID_CMD; | 590 | send_msg.cmd = IPMI_GET_DEVICE_ID_CMD; |
@@ -632,8 +649,7 @@ static void ipmi_po_smi_gone(int if_num) | |||
632 | pm_power_off = old_poweroff_func; | 649 | pm_power_off = old_poweroff_func; |
633 | } | 650 | } |
634 | 651 | ||
635 | static struct ipmi_smi_watcher smi_watcher = | 652 | static struct ipmi_smi_watcher smi_watcher = { |
636 | { | ||
637 | .owner = THIS_MODULE, | 653 | .owner = THIS_MODULE, |
638 | .new_smi = ipmi_po_new_smi, | 654 | .new_smi = ipmi_po_new_smi, |
639 | .smi_gone = ipmi_po_smi_gone | 655 | .smi_gone = ipmi_po_smi_gone |
@@ -675,12 +691,12 @@ static struct ctl_table_header *ipmi_table_header; | |||
675 | /* | 691 | /* |
676 | * Startup and shutdown functions. | 692 | * Startup and shutdown functions. |
677 | */ | 693 | */ |
678 | static int ipmi_poweroff_init (void) | 694 | static int ipmi_poweroff_init(void) |
679 | { | 695 | { |
680 | int rv; | 696 | int rv; |
681 | 697 | ||
682 | printk (KERN_INFO "Copyright (C) 2004 MontaVista Software -" | 698 | printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -" |
683 | " IPMI Powerdown via sys_reboot.\n"); | 699 | " IPMI Powerdown via sys_reboot.\n"); |
684 | 700 | ||
685 | if (poweroff_powercycle) | 701 | if (poweroff_powercycle) |
686 | printk(KERN_INFO PFX "Power cycle is enabled.\n"); | 702 | printk(KERN_INFO PFX "Power cycle is enabled.\n"); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 4f560d0bb808..5a5455585c1d 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -80,7 +80,7 @@ | |||
80 | #define SI_USEC_PER_JIFFY (1000000/HZ) | 80 | #define SI_USEC_PER_JIFFY (1000000/HZ) |
81 | #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) | 81 | #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) |
82 | #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a | 82 | #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a |
83 | short timeout */ | 83 | short timeout */ |
84 | 84 | ||
85 | /* Bit for BMC global enables. */ | 85 | /* Bit for BMC global enables. */ |
86 | #define IPMI_BMC_RCV_MSG_INTR 0x01 | 86 | #define IPMI_BMC_RCV_MSG_INTR 0x01 |
@@ -114,14 +114,61 @@ static char *si_to_str[] = { "kcs", "smic", "bt" }; | |||
114 | 114 | ||
115 | #define DEVICE_NAME "ipmi_si" | 115 | #define DEVICE_NAME "ipmi_si" |
116 | 116 | ||
117 | static struct device_driver ipmi_driver = | 117 | static struct device_driver ipmi_driver = { |
118 | { | ||
119 | .name = DEVICE_NAME, | 118 | .name = DEVICE_NAME, |
120 | .bus = &platform_bus_type | 119 | .bus = &platform_bus_type |
121 | }; | 120 | }; |
122 | 121 | ||
123 | struct smi_info | 122 | |
124 | { | 123 | /* |
124 | * Indexes into stats[] in smi_info below. | ||
125 | */ | ||
126 | enum si_stat_indexes { | ||
127 | /* | ||
128 | * Number of times the driver requested a timer while an operation | ||
129 | * was in progress. | ||
130 | */ | ||
131 | SI_STAT_short_timeouts = 0, | ||
132 | |||
133 | /* | ||
134 | * Number of times the driver requested a timer while nothing was in | ||
135 | * progress. | ||
136 | */ | ||
137 | SI_STAT_long_timeouts, | ||
138 | |||
139 | /* Number of times the interface was idle while being polled. */ | ||
140 | SI_STAT_idles, | ||
141 | |||
142 | /* Number of interrupts the driver handled. */ | ||
143 | SI_STAT_interrupts, | ||
144 | |||
145 | /* Number of time the driver got an ATTN from the hardware. */ | ||
146 | SI_STAT_attentions, | ||
147 | |||
148 | /* Number of times the driver requested flags from the hardware. */ | ||
149 | SI_STAT_flag_fetches, | ||
150 | |||
151 | /* Number of times the hardware didn't follow the state machine. */ | ||
152 | SI_STAT_hosed_count, | ||
153 | |||
154 | /* Number of completed messages. */ | ||
155 | SI_STAT_complete_transactions, | ||
156 | |||
157 | /* Number of IPMI events received from the hardware. */ | ||
158 | SI_STAT_events, | ||
159 | |||
160 | /* Number of watchdog pretimeouts. */ | ||
161 | SI_STAT_watchdog_pretimeouts, | ||
162 | |||
163 | /* Number of asyncronous messages received. */ | ||
164 | SI_STAT_incoming_messages, | ||
165 | |||
166 | |||
167 | /* This *must* remain last, add new values above this. */ | ||
168 | SI_NUM_STATS | ||
169 | }; | ||
170 | |||
171 | struct smi_info { | ||
125 | int intf_num; | 172 | int intf_num; |
126 | ipmi_smi_t intf; | 173 | ipmi_smi_t intf; |
127 | struct si_sm_data *si_sm; | 174 | struct si_sm_data *si_sm; |
@@ -134,8 +181,10 @@ struct smi_info | |||
134 | struct ipmi_smi_msg *curr_msg; | 181 | struct ipmi_smi_msg *curr_msg; |
135 | enum si_intf_state si_state; | 182 | enum si_intf_state si_state; |
136 | 183 | ||
137 | /* Used to handle the various types of I/O that can occur with | 184 | /* |
138 | IPMI */ | 185 | * Used to handle the various types of I/O that can occur with |
186 | * IPMI | ||
187 | */ | ||
139 | struct si_sm_io io; | 188 | struct si_sm_io io; |
140 | int (*io_setup)(struct smi_info *info); | 189 | int (*io_setup)(struct smi_info *info); |
141 | void (*io_cleanup)(struct smi_info *info); | 190 | void (*io_cleanup)(struct smi_info *info); |
@@ -146,15 +195,18 @@ struct smi_info | |||
146 | void (*addr_source_cleanup)(struct smi_info *info); | 195 | void (*addr_source_cleanup)(struct smi_info *info); |
147 | void *addr_source_data; | 196 | void *addr_source_data; |
148 | 197 | ||
149 | /* Per-OEM handler, called from handle_flags(). | 198 | /* |
150 | Returns 1 when handle_flags() needs to be re-run | 199 | * Per-OEM handler, called from handle_flags(). Returns 1 |
151 | or 0 indicating it set si_state itself. | 200 | * when handle_flags() needs to be re-run or 0 indicating it |
152 | */ | 201 | * set si_state itself. |
202 | */ | ||
153 | int (*oem_data_avail_handler)(struct smi_info *smi_info); | 203 | int (*oem_data_avail_handler)(struct smi_info *smi_info); |
154 | 204 | ||
155 | /* Flags from the last GET_MSG_FLAGS command, used when an ATTN | 205 | /* |
156 | is set to hold the flags until we are done handling everything | 206 | * Flags from the last GET_MSG_FLAGS command, used when an ATTN |
157 | from the flags. */ | 207 | * is set to hold the flags until we are done handling everything |
208 | * from the flags. | ||
209 | */ | ||
158 | #define RECEIVE_MSG_AVAIL 0x01 | 210 | #define RECEIVE_MSG_AVAIL 0x01 |
159 | #define EVENT_MSG_BUFFER_FULL 0x02 | 211 | #define EVENT_MSG_BUFFER_FULL 0x02 |
160 | #define WDT_PRE_TIMEOUT_INT 0x08 | 212 | #define WDT_PRE_TIMEOUT_INT 0x08 |
@@ -162,25 +214,31 @@ struct smi_info | |||
162 | #define OEM1_DATA_AVAIL 0x40 | 214 | #define OEM1_DATA_AVAIL 0x40 |
163 | #define OEM2_DATA_AVAIL 0x80 | 215 | #define OEM2_DATA_AVAIL 0x80 |
164 | #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ | 216 | #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ |
165 | OEM1_DATA_AVAIL | \ | 217 | OEM1_DATA_AVAIL | \ |
166 | OEM2_DATA_AVAIL) | 218 | OEM2_DATA_AVAIL) |
167 | unsigned char msg_flags; | 219 | unsigned char msg_flags; |
168 | 220 | ||
169 | /* If set to true, this will request events the next time the | 221 | /* |
170 | state machine is idle. */ | 222 | * If set to true, this will request events the next time the |
223 | * state machine is idle. | ||
224 | */ | ||
171 | atomic_t req_events; | 225 | atomic_t req_events; |
172 | 226 | ||
173 | /* If true, run the state machine to completion on every send | 227 | /* |
174 | call. Generally used after a panic to make sure stuff goes | 228 | * If true, run the state machine to completion on every send |
175 | out. */ | 229 | * call. Generally used after a panic to make sure stuff goes |
230 | * out. | ||
231 | */ | ||
176 | int run_to_completion; | 232 | int run_to_completion; |
177 | 233 | ||
178 | /* The I/O port of an SI interface. */ | 234 | /* The I/O port of an SI interface. */ |
179 | int port; | 235 | int port; |
180 | 236 | ||
181 | /* The space between start addresses of the two ports. For | 237 | /* |
182 | instance, if the first port is 0xca2 and the spacing is 4, then | 238 | * The space between start addresses of the two ports. For |
183 | the second port is 0xca6. */ | 239 | * instance, if the first port is 0xca2 and the spacing is 4, then |
240 | * the second port is 0xca6. | ||
241 | */ | ||
184 | unsigned int spacing; | 242 | unsigned int spacing; |
185 | 243 | ||
186 | /* zero if no irq; */ | 244 | /* zero if no irq; */ |
@@ -195,10 +253,12 @@ struct smi_info | |||
195 | /* Used to gracefully stop the timer without race conditions. */ | 253 | /* Used to gracefully stop the timer without race conditions. */ |
196 | atomic_t stop_operation; | 254 | atomic_t stop_operation; |
197 | 255 | ||
198 | /* The driver will disable interrupts when it gets into a | 256 | /* |
199 | situation where it cannot handle messages due to lack of | 257 | * The driver will disable interrupts when it gets into a |
200 | memory. Once that situation clears up, it will re-enable | 258 | * situation where it cannot handle messages due to lack of |
201 | interrupts. */ | 259 | * memory. Once that situation clears up, it will re-enable |
260 | * interrupts. | ||
261 | */ | ||
202 | int interrupt_disabled; | 262 | int interrupt_disabled; |
203 | 263 | ||
204 | /* From the get device id response... */ | 264 | /* From the get device id response... */ |
@@ -208,33 +268,28 @@ struct smi_info | |||
208 | struct device *dev; | 268 | struct device *dev; |
209 | struct platform_device *pdev; | 269 | struct platform_device *pdev; |
210 | 270 | ||
211 | /* True if we allocated the device, false if it came from | 271 | /* |
212 | * someplace else (like PCI). */ | 272 | * True if we allocated the device, false if it came from |
273 | * someplace else (like PCI). | ||
274 | */ | ||
213 | int dev_registered; | 275 | int dev_registered; |
214 | 276 | ||
215 | /* Slave address, could be reported from DMI. */ | 277 | /* Slave address, could be reported from DMI. */ |
216 | unsigned char slave_addr; | 278 | unsigned char slave_addr; |
217 | 279 | ||
218 | /* Counters and things for the proc filesystem. */ | 280 | /* Counters and things for the proc filesystem. */ |
219 | spinlock_t count_lock; | 281 | atomic_t stats[SI_NUM_STATS]; |
220 | unsigned long short_timeouts; | 282 | |
221 | unsigned long long_timeouts; | 283 | struct task_struct *thread; |
222 | unsigned long timeout_restarts; | ||
223 | unsigned long idles; | ||
224 | unsigned long interrupts; | ||
225 | unsigned long attentions; | ||
226 | unsigned long flag_fetches; | ||
227 | unsigned long hosed_count; | ||
228 | unsigned long complete_transactions; | ||
229 | unsigned long events; | ||
230 | unsigned long watchdog_pretimeouts; | ||
231 | unsigned long incoming_messages; | ||
232 | |||
233 | struct task_struct *thread; | ||
234 | 284 | ||
235 | struct list_head link; | 285 | struct list_head link; |
236 | }; | 286 | }; |
237 | 287 | ||
288 | #define smi_inc_stat(smi, stat) \ | ||
289 | atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) | ||
290 | #define smi_get_stat(smi, stat) \ | ||
291 | ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) | ||
292 | |||
238 | #define SI_MAX_PARMS 4 | 293 | #define SI_MAX_PARMS 4 |
239 | 294 | ||
240 | static int force_kipmid[SI_MAX_PARMS]; | 295 | static int force_kipmid[SI_MAX_PARMS]; |
@@ -246,7 +301,7 @@ static int try_smi_init(struct smi_info *smi); | |||
246 | static void cleanup_one_si(struct smi_info *to_clean); | 301 | static void cleanup_one_si(struct smi_info *to_clean); |
247 | 302 | ||
248 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); | 303 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); |
249 | static int register_xaction_notifier(struct notifier_block * nb) | 304 | static int register_xaction_notifier(struct notifier_block *nb) |
250 | { | 305 | { |
251 | return atomic_notifier_chain_register(&xaction_notifier_list, nb); | 306 | return atomic_notifier_chain_register(&xaction_notifier_list, nb); |
252 | } | 307 | } |
@@ -255,7 +310,7 @@ static void deliver_recv_msg(struct smi_info *smi_info, | |||
255 | struct ipmi_smi_msg *msg) | 310 | struct ipmi_smi_msg *msg) |
256 | { | 311 | { |
257 | /* Deliver the message to the upper layer with the lock | 312 | /* Deliver the message to the upper layer with the lock |
258 | released. */ | 313 | released. */ |
259 | spin_unlock(&(smi_info->si_lock)); | 314 | spin_unlock(&(smi_info->si_lock)); |
260 | ipmi_smi_msg_received(smi_info->intf, msg); | 315 | ipmi_smi_msg_received(smi_info->intf, msg); |
261 | spin_lock(&(smi_info->si_lock)); | 316 | spin_lock(&(smi_info->si_lock)); |
@@ -287,9 +342,12 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
287 | struct timeval t; | 342 | struct timeval t; |
288 | #endif | 343 | #endif |
289 | 344 | ||
290 | /* No need to save flags, we aleady have interrupts off and we | 345 | /* |
291 | already hold the SMI lock. */ | 346 | * No need to save flags, we aleady have interrupts off and we |
292 | spin_lock(&(smi_info->msg_lock)); | 347 | * already hold the SMI lock. |
348 | */ | ||
349 | if (!smi_info->run_to_completion) | ||
350 | spin_lock(&(smi_info->msg_lock)); | ||
293 | 351 | ||
294 | /* Pick the high priority queue first. */ | 352 | /* Pick the high priority queue first. */ |
295 | if (!list_empty(&(smi_info->hp_xmit_msgs))) { | 353 | if (!list_empty(&(smi_info->hp_xmit_msgs))) { |
@@ -310,7 +368,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
310 | link); | 368 | link); |
311 | #ifdef DEBUG_TIMING | 369 | #ifdef DEBUG_TIMING |
312 | do_gettimeofday(&t); | 370 | do_gettimeofday(&t); |
313 | printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 371 | printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
314 | #endif | 372 | #endif |
315 | err = atomic_notifier_call_chain(&xaction_notifier_list, | 373 | err = atomic_notifier_call_chain(&xaction_notifier_list, |
316 | 0, smi_info); | 374 | 0, smi_info); |
@@ -322,14 +380,14 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
322 | smi_info->si_sm, | 380 | smi_info->si_sm, |
323 | smi_info->curr_msg->data, | 381 | smi_info->curr_msg->data, |
324 | smi_info->curr_msg->data_size); | 382 | smi_info->curr_msg->data_size); |
325 | if (err) { | 383 | if (err) |
326 | return_hosed_msg(smi_info, err); | 384 | return_hosed_msg(smi_info, err); |
327 | } | ||
328 | 385 | ||
329 | rv = SI_SM_CALL_WITHOUT_DELAY; | 386 | rv = SI_SM_CALL_WITHOUT_DELAY; |
330 | } | 387 | } |
331 | out: | 388 | out: |
332 | spin_unlock(&(smi_info->msg_lock)); | 389 | if (!smi_info->run_to_completion) |
390 | spin_unlock(&(smi_info->msg_lock)); | ||
333 | 391 | ||
334 | return rv; | 392 | return rv; |
335 | } | 393 | } |
@@ -338,8 +396,10 @@ static void start_enable_irq(struct smi_info *smi_info) | |||
338 | { | 396 | { |
339 | unsigned char msg[2]; | 397 | unsigned char msg[2]; |
340 | 398 | ||
341 | /* If we are enabling interrupts, we have to tell the | 399 | /* |
342 | BMC to use them. */ | 400 | * If we are enabling interrupts, we have to tell the |
401 | * BMC to use them. | ||
402 | */ | ||
343 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 403 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
344 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | 404 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
345 | 405 | ||
@@ -371,10 +431,12 @@ static void start_clear_flags(struct smi_info *smi_info) | |||
371 | smi_info->si_state = SI_CLEARING_FLAGS; | 431 | smi_info->si_state = SI_CLEARING_FLAGS; |
372 | } | 432 | } |
373 | 433 | ||
374 | /* When we have a situtaion where we run out of memory and cannot | 434 | /* |
375 | allocate messages, we just leave them in the BMC and run the system | 435 | * When we have a situtaion where we run out of memory and cannot |
376 | polled until we can allocate some memory. Once we have some | 436 | * allocate messages, we just leave them in the BMC and run the system |
377 | memory, we will re-enable the interrupt. */ | 437 | * polled until we can allocate some memory. Once we have some |
438 | * memory, we will re-enable the interrupt. | ||
439 | */ | ||
378 | static inline void disable_si_irq(struct smi_info *smi_info) | 440 | static inline void disable_si_irq(struct smi_info *smi_info) |
379 | { | 441 | { |
380 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 442 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
@@ -396,9 +458,7 @@ static void handle_flags(struct smi_info *smi_info) | |||
396 | retry: | 458 | retry: |
397 | if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { | 459 | if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { |
398 | /* Watchdog pre-timeout */ | 460 | /* Watchdog pre-timeout */ |
399 | spin_lock(&smi_info->count_lock); | 461 | smi_inc_stat(smi_info, watchdog_pretimeouts); |
400 | smi_info->watchdog_pretimeouts++; | ||
401 | spin_unlock(&smi_info->count_lock); | ||
402 | 462 | ||
403 | start_clear_flags(smi_info); | 463 | start_clear_flags(smi_info); |
404 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; | 464 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
@@ -444,12 +504,11 @@ static void handle_flags(struct smi_info *smi_info) | |||
444 | smi_info->curr_msg->data_size); | 504 | smi_info->curr_msg->data_size); |
445 | smi_info->si_state = SI_GETTING_EVENTS; | 505 | smi_info->si_state = SI_GETTING_EVENTS; |
446 | } else if (smi_info->msg_flags & OEM_DATA_AVAIL && | 506 | } else if (smi_info->msg_flags & OEM_DATA_AVAIL && |
447 | smi_info->oem_data_avail_handler) { | 507 | smi_info->oem_data_avail_handler) { |
448 | if (smi_info->oem_data_avail_handler(smi_info)) | 508 | if (smi_info->oem_data_avail_handler(smi_info)) |
449 | goto retry; | 509 | goto retry; |
450 | } else { | 510 | } else |
451 | smi_info->si_state = SI_NORMAL; | 511 | smi_info->si_state = SI_NORMAL; |
452 | } | ||
453 | } | 512 | } |
454 | 513 | ||
455 | static void handle_transaction_done(struct smi_info *smi_info) | 514 | static void handle_transaction_done(struct smi_info *smi_info) |
@@ -459,7 +518,7 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
459 | struct timeval t; | 518 | struct timeval t; |
460 | 519 | ||
461 | do_gettimeofday(&t); | 520 | do_gettimeofday(&t); |
462 | printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 521 | printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
463 | #endif | 522 | #endif |
464 | switch (smi_info->si_state) { | 523 | switch (smi_info->si_state) { |
465 | case SI_NORMAL: | 524 | case SI_NORMAL: |
@@ -472,9 +531,11 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
472 | smi_info->curr_msg->rsp, | 531 | smi_info->curr_msg->rsp, |
473 | IPMI_MAX_MSG_LENGTH); | 532 | IPMI_MAX_MSG_LENGTH); |
474 | 533 | ||
475 | /* Do this here becase deliver_recv_msg() releases the | 534 | /* |
476 | lock, and a new message can be put in during the | 535 | * Do this here becase deliver_recv_msg() releases the |
477 | time the lock is released. */ | 536 | * lock, and a new message can be put in during the |
537 | * time the lock is released. | ||
538 | */ | ||
478 | msg = smi_info->curr_msg; | 539 | msg = smi_info->curr_msg; |
479 | smi_info->curr_msg = NULL; | 540 | smi_info->curr_msg = NULL; |
480 | deliver_recv_msg(smi_info, msg); | 541 | deliver_recv_msg(smi_info, msg); |
@@ -488,12 +549,13 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
488 | /* We got the flags from the SMI, now handle them. */ | 549 | /* We got the flags from the SMI, now handle them. */ |
489 | len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 550 | len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
490 | if (msg[2] != 0) { | 551 | if (msg[2] != 0) { |
491 | /* Error fetching flags, just give up for | 552 | /* Error fetching flags, just give up for now. */ |
492 | now. */ | ||
493 | smi_info->si_state = SI_NORMAL; | 553 | smi_info->si_state = SI_NORMAL; |
494 | } else if (len < 4) { | 554 | } else if (len < 4) { |
495 | /* Hmm, no flags. That's technically illegal, but | 555 | /* |
496 | don't use uninitialized data. */ | 556 | * Hmm, no flags. That's technically illegal, but |
557 | * don't use uninitialized data. | ||
558 | */ | ||
497 | smi_info->si_state = SI_NORMAL; | 559 | smi_info->si_state = SI_NORMAL; |
498 | } else { | 560 | } else { |
499 | smi_info->msg_flags = msg[3]; | 561 | smi_info->msg_flags = msg[3]; |
@@ -530,9 +592,11 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
530 | smi_info->curr_msg->rsp, | 592 | smi_info->curr_msg->rsp, |
531 | IPMI_MAX_MSG_LENGTH); | 593 | IPMI_MAX_MSG_LENGTH); |
532 | 594 | ||
533 | /* Do this here becase deliver_recv_msg() releases the | 595 | /* |
534 | lock, and a new message can be put in during the | 596 | * Do this here becase deliver_recv_msg() releases the |
535 | time the lock is released. */ | 597 | * lock, and a new message can be put in during the |
598 | * time the lock is released. | ||
599 | */ | ||
536 | msg = smi_info->curr_msg; | 600 | msg = smi_info->curr_msg; |
537 | smi_info->curr_msg = NULL; | 601 | smi_info->curr_msg = NULL; |
538 | if (msg->rsp[2] != 0) { | 602 | if (msg->rsp[2] != 0) { |
@@ -543,14 +607,14 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
543 | smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; | 607 | smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; |
544 | handle_flags(smi_info); | 608 | handle_flags(smi_info); |
545 | } else { | 609 | } else { |
546 | spin_lock(&smi_info->count_lock); | 610 | smi_inc_stat(smi_info, events); |
547 | smi_info->events++; | 611 | |
548 | spin_unlock(&smi_info->count_lock); | 612 | /* |
549 | 613 | * Do this before we deliver the message | |
550 | /* Do this before we deliver the message | 614 | * because delivering the message releases the |
551 | because delivering the message releases the | 615 | * lock and something else can mess with the |
552 | lock and something else can mess with the | 616 | * state. |
553 | state. */ | 617 | */ |
554 | handle_flags(smi_info); | 618 | handle_flags(smi_info); |
555 | 619 | ||
556 | deliver_recv_msg(smi_info, msg); | 620 | deliver_recv_msg(smi_info, msg); |
@@ -566,9 +630,11 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
566 | smi_info->curr_msg->rsp, | 630 | smi_info->curr_msg->rsp, |
567 | IPMI_MAX_MSG_LENGTH); | 631 | IPMI_MAX_MSG_LENGTH); |
568 | 632 | ||
569 | /* Do this here becase deliver_recv_msg() releases the | 633 | /* |
570 | lock, and a new message can be put in during the | 634 | * Do this here becase deliver_recv_msg() releases the |
571 | time the lock is released. */ | 635 | * lock, and a new message can be put in during the |
636 | * time the lock is released. | ||
637 | */ | ||
572 | msg = smi_info->curr_msg; | 638 | msg = smi_info->curr_msg; |
573 | smi_info->curr_msg = NULL; | 639 | smi_info->curr_msg = NULL; |
574 | if (msg->rsp[2] != 0) { | 640 | if (msg->rsp[2] != 0) { |
@@ -579,14 +645,14 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
579 | smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; | 645 | smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; |
580 | handle_flags(smi_info); | 646 | handle_flags(smi_info); |
581 | } else { | 647 | } else { |
582 | spin_lock(&smi_info->count_lock); | 648 | smi_inc_stat(smi_info, incoming_messages); |
583 | smi_info->incoming_messages++; | 649 | |
584 | spin_unlock(&smi_info->count_lock); | 650 | /* |
585 | 651 | * Do this before we deliver the message | |
586 | /* Do this before we deliver the message | 652 | * because delivering the message releases the |
587 | because delivering the message releases the | 653 | * lock and something else can mess with the |
588 | lock and something else can mess with the | 654 | * state. |
589 | state. */ | 655 | */ |
590 | handle_flags(smi_info); | 656 | handle_flags(smi_info); |
591 | 657 | ||
592 | deliver_recv_msg(smi_info, msg); | 658 | deliver_recv_msg(smi_info, msg); |
@@ -674,69 +740,70 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
674 | } | 740 | } |
675 | } | 741 | } |
676 | 742 | ||
677 | /* Called on timeouts and events. Timeouts should pass the elapsed | 743 | /* |
678 | time, interrupts should pass in zero. Must be called with | 744 | * Called on timeouts and events. Timeouts should pass the elapsed |
679 | si_lock held and interrupts disabled. */ | 745 | * time, interrupts should pass in zero. Must be called with |
746 | * si_lock held and interrupts disabled. | ||
747 | */ | ||
680 | static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | 748 | static enum si_sm_result smi_event_handler(struct smi_info *smi_info, |
681 | int time) | 749 | int time) |
682 | { | 750 | { |
683 | enum si_sm_result si_sm_result; | 751 | enum si_sm_result si_sm_result; |
684 | 752 | ||
685 | restart: | 753 | restart: |
686 | /* There used to be a loop here that waited a little while | 754 | /* |
687 | (around 25us) before giving up. That turned out to be | 755 | * There used to be a loop here that waited a little while |
688 | pointless, the minimum delays I was seeing were in the 300us | 756 | * (around 25us) before giving up. That turned out to be |
689 | range, which is far too long to wait in an interrupt. So | 757 | * pointless, the minimum delays I was seeing were in the 300us |
690 | we just run until the state machine tells us something | 758 | * range, which is far too long to wait in an interrupt. So |
691 | happened or it needs a delay. */ | 759 | * we just run until the state machine tells us something |
760 | * happened or it needs a delay. | ||
761 | */ | ||
692 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); | 762 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); |
693 | time = 0; | 763 | time = 0; |
694 | while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) | 764 | while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) |
695 | { | ||
696 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 765 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
697 | } | ||
698 | 766 | ||
699 | if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) | 767 | if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { |
700 | { | 768 | smi_inc_stat(smi_info, complete_transactions); |
701 | spin_lock(&smi_info->count_lock); | ||
702 | smi_info->complete_transactions++; | ||
703 | spin_unlock(&smi_info->count_lock); | ||
704 | 769 | ||
705 | handle_transaction_done(smi_info); | 770 | handle_transaction_done(smi_info); |
706 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 771 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
707 | } | 772 | } else if (si_sm_result == SI_SM_HOSED) { |
708 | else if (si_sm_result == SI_SM_HOSED) | 773 | smi_inc_stat(smi_info, hosed_count); |
709 | { | ||
710 | spin_lock(&smi_info->count_lock); | ||
711 | smi_info->hosed_count++; | ||
712 | spin_unlock(&smi_info->count_lock); | ||
713 | 774 | ||
714 | /* Do the before return_hosed_msg, because that | 775 | /* |
715 | releases the lock. */ | 776 | * Do the before return_hosed_msg, because that |
777 | * releases the lock. | ||
778 | */ | ||
716 | smi_info->si_state = SI_NORMAL; | 779 | smi_info->si_state = SI_NORMAL; |
717 | if (smi_info->curr_msg != NULL) { | 780 | if (smi_info->curr_msg != NULL) { |
718 | /* If we were handling a user message, format | 781 | /* |
719 | a response to send to the upper layer to | 782 | * If we were handling a user message, format |
720 | tell it about the error. */ | 783 | * a response to send to the upper layer to |
784 | * tell it about the error. | ||
785 | */ | ||
721 | return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); | 786 | return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); |
722 | } | 787 | } |
723 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 788 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
724 | } | 789 | } |
725 | 790 | ||
726 | /* We prefer handling attn over new messages. */ | 791 | /* |
727 | if (si_sm_result == SI_SM_ATTN) | 792 | * We prefer handling attn over new messages. But don't do |
728 | { | 793 | * this if there is not yet an upper layer to handle anything. |
794 | */ | ||
795 | if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) { | ||
729 | unsigned char msg[2]; | 796 | unsigned char msg[2]; |
730 | 797 | ||
731 | spin_lock(&smi_info->count_lock); | 798 | smi_inc_stat(smi_info, attentions); |
732 | smi_info->attentions++; | ||
733 | spin_unlock(&smi_info->count_lock); | ||
734 | 799 | ||
735 | /* Got a attn, send down a get message flags to see | 800 | /* |
736 | what's causing it. It would be better to handle | 801 | * Got a attn, send down a get message flags to see |
737 | this in the upper layer, but due to the way | 802 | * what's causing it. It would be better to handle |
738 | interrupts work with the SMI, that's not really | 803 | * this in the upper layer, but due to the way |
739 | possible. */ | 804 | * interrupts work with the SMI, that's not really |
805 | * possible. | ||
806 | */ | ||
740 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 807 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
741 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; | 808 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; |
742 | 809 | ||
@@ -748,20 +815,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
748 | 815 | ||
749 | /* If we are currently idle, try to start the next message. */ | 816 | /* If we are currently idle, try to start the next message. */ |
750 | if (si_sm_result == SI_SM_IDLE) { | 817 | if (si_sm_result == SI_SM_IDLE) { |
751 | spin_lock(&smi_info->count_lock); | 818 | smi_inc_stat(smi_info, idles); |
752 | smi_info->idles++; | ||
753 | spin_unlock(&smi_info->count_lock); | ||
754 | 819 | ||
755 | si_sm_result = start_next_msg(smi_info); | 820 | si_sm_result = start_next_msg(smi_info); |
756 | if (si_sm_result != SI_SM_IDLE) | 821 | if (si_sm_result != SI_SM_IDLE) |
757 | goto restart; | 822 | goto restart; |
758 | } | 823 | } |
759 | 824 | ||
760 | if ((si_sm_result == SI_SM_IDLE) | 825 | if ((si_sm_result == SI_SM_IDLE) |
761 | && (atomic_read(&smi_info->req_events))) | 826 | && (atomic_read(&smi_info->req_events))) { |
762 | { | 827 | /* |
763 | /* We are idle and the upper layer requested that I fetch | 828 | * We are idle and the upper layer requested that I fetch |
764 | events, so do so. */ | 829 | * events, so do so. |
830 | */ | ||
765 | atomic_set(&smi_info->req_events, 0); | 831 | atomic_set(&smi_info->req_events, 0); |
766 | 832 | ||
767 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | 833 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |
@@ -803,56 +869,50 @@ static void sender(void *send_info, | |||
803 | return; | 869 | return; |
804 | } | 870 | } |
805 | 871 | ||
806 | spin_lock_irqsave(&(smi_info->msg_lock), flags); | ||
807 | #ifdef DEBUG_TIMING | 872 | #ifdef DEBUG_TIMING |
808 | do_gettimeofday(&t); | 873 | do_gettimeofday(&t); |
809 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 874 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
810 | #endif | 875 | #endif |
811 | 876 | ||
812 | if (smi_info->run_to_completion) { | 877 | if (smi_info->run_to_completion) { |
813 | /* If we are running to completion, then throw it in | 878 | /* |
814 | the list and run transactions until everything is | 879 | * If we are running to completion, then throw it in |
815 | clear. Priority doesn't matter here. */ | 880 | * the list and run transactions until everything is |
881 | * clear. Priority doesn't matter here. | ||
882 | */ | ||
883 | |||
884 | /* | ||
885 | * Run to completion means we are single-threaded, no | ||
886 | * need for locks. | ||
887 | */ | ||
816 | list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); | 888 | list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); |
817 | 889 | ||
818 | /* We have to release the msg lock and claim the smi | ||
819 | lock in this case, because of race conditions. */ | ||
820 | spin_unlock_irqrestore(&(smi_info->msg_lock), flags); | ||
821 | |||
822 | spin_lock_irqsave(&(smi_info->si_lock), flags); | ||
823 | result = smi_event_handler(smi_info, 0); | 890 | result = smi_event_handler(smi_info, 0); |
824 | while (result != SI_SM_IDLE) { | 891 | while (result != SI_SM_IDLE) { |
825 | udelay(SI_SHORT_TIMEOUT_USEC); | 892 | udelay(SI_SHORT_TIMEOUT_USEC); |
826 | result = smi_event_handler(smi_info, | 893 | result = smi_event_handler(smi_info, |
827 | SI_SHORT_TIMEOUT_USEC); | 894 | SI_SHORT_TIMEOUT_USEC); |
828 | } | 895 | } |
829 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | ||
830 | return; | 896 | return; |
831 | } else { | ||
832 | if (priority > 0) { | ||
833 | list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs)); | ||
834 | } else { | ||
835 | list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); | ||
836 | } | ||
837 | } | 897 | } |
838 | spin_unlock_irqrestore(&(smi_info->msg_lock), flags); | ||
839 | 898 | ||
840 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 899 | spin_lock_irqsave(&smi_info->msg_lock, flags); |
841 | if ((smi_info->si_state == SI_NORMAL) | 900 | if (priority > 0) |
842 | && (smi_info->curr_msg == NULL)) | 901 | list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); |
843 | { | 902 | else |
903 | list_add_tail(&msg->link, &smi_info->xmit_msgs); | ||
904 | spin_unlock_irqrestore(&smi_info->msg_lock, flags); | ||
905 | |||
906 | spin_lock_irqsave(&smi_info->si_lock, flags); | ||
907 | if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) | ||
844 | start_next_msg(smi_info); | 908 | start_next_msg(smi_info); |
845 | } | 909 | spin_unlock_irqrestore(&smi_info->si_lock, flags); |
846 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | ||
847 | } | 910 | } |
848 | 911 | ||
849 | static void set_run_to_completion(void *send_info, int i_run_to_completion) | 912 | static void set_run_to_completion(void *send_info, int i_run_to_completion) |
850 | { | 913 | { |
851 | struct smi_info *smi_info = send_info; | 914 | struct smi_info *smi_info = send_info; |
852 | enum si_sm_result result; | 915 | enum si_sm_result result; |
853 | unsigned long flags; | ||
854 | |||
855 | spin_lock_irqsave(&(smi_info->si_lock), flags); | ||
856 | 916 | ||
857 | smi_info->run_to_completion = i_run_to_completion; | 917 | smi_info->run_to_completion = i_run_to_completion; |
858 | if (i_run_to_completion) { | 918 | if (i_run_to_completion) { |
@@ -863,8 +923,6 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion) | |||
863 | SI_SHORT_TIMEOUT_USEC); | 923 | SI_SHORT_TIMEOUT_USEC); |
864 | } | 924 | } |
865 | } | 925 | } |
866 | |||
867 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | ||
868 | } | 926 | } |
869 | 927 | ||
870 | static int ipmi_thread(void *data) | 928 | static int ipmi_thread(void *data) |
@@ -878,9 +936,8 @@ static int ipmi_thread(void *data) | |||
878 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 936 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
879 | smi_result = smi_event_handler(smi_info, 0); | 937 | smi_result = smi_event_handler(smi_info, 0); |
880 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 938 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
881 | if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { | 939 | if (smi_result == SI_SM_CALL_WITHOUT_DELAY) |
882 | /* do nothing */ | 940 | ; /* do nothing */ |
883 | } | ||
884 | else if (smi_result == SI_SM_CALL_WITH_DELAY) | 941 | else if (smi_result == SI_SM_CALL_WITH_DELAY) |
885 | schedule(); | 942 | schedule(); |
886 | else | 943 | else |
@@ -931,7 +988,7 @@ static void smi_timeout(unsigned long data) | |||
931 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 988 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
932 | #ifdef DEBUG_TIMING | 989 | #ifdef DEBUG_TIMING |
933 | do_gettimeofday(&t); | 990 | do_gettimeofday(&t); |
934 | printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 991 | printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
935 | #endif | 992 | #endif |
936 | jiffies_now = jiffies; | 993 | jiffies_now = jiffies; |
937 | time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) | 994 | time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) |
@@ -945,23 +1002,19 @@ static void smi_timeout(unsigned long data) | |||
945 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 1002 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
946 | /* Running with interrupts, only do long timeouts. */ | 1003 | /* Running with interrupts, only do long timeouts. */ |
947 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; | 1004 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; |
948 | spin_lock_irqsave(&smi_info->count_lock, flags); | 1005 | smi_inc_stat(smi_info, long_timeouts); |
949 | smi_info->long_timeouts++; | ||
950 | spin_unlock_irqrestore(&smi_info->count_lock, flags); | ||
951 | goto do_add_timer; | 1006 | goto do_add_timer; |
952 | } | 1007 | } |
953 | 1008 | ||
954 | /* If the state machine asks for a short delay, then shorten | 1009 | /* |
955 | the timer timeout. */ | 1010 | * If the state machine asks for a short delay, then shorten |
1011 | * the timer timeout. | ||
1012 | */ | ||
956 | if (smi_result == SI_SM_CALL_WITH_DELAY) { | 1013 | if (smi_result == SI_SM_CALL_WITH_DELAY) { |
957 | spin_lock_irqsave(&smi_info->count_lock, flags); | 1014 | smi_inc_stat(smi_info, short_timeouts); |
958 | smi_info->short_timeouts++; | ||
959 | spin_unlock_irqrestore(&smi_info->count_lock, flags); | ||
960 | smi_info->si_timer.expires = jiffies + 1; | 1015 | smi_info->si_timer.expires = jiffies + 1; |
961 | } else { | 1016 | } else { |
962 | spin_lock_irqsave(&smi_info->count_lock, flags); | 1017 | smi_inc_stat(smi_info, long_timeouts); |
963 | smi_info->long_timeouts++; | ||
964 | spin_unlock_irqrestore(&smi_info->count_lock, flags); | ||
965 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; | 1018 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; |
966 | } | 1019 | } |
967 | 1020 | ||
@@ -979,13 +1032,11 @@ static irqreturn_t si_irq_handler(int irq, void *data) | |||
979 | 1032 | ||
980 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 1033 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
981 | 1034 | ||
982 | spin_lock(&smi_info->count_lock); | 1035 | smi_inc_stat(smi_info, interrupts); |
983 | smi_info->interrupts++; | ||
984 | spin_unlock(&smi_info->count_lock); | ||
985 | 1036 | ||
986 | #ifdef DEBUG_TIMING | 1037 | #ifdef DEBUG_TIMING |
987 | do_gettimeofday(&t); | 1038 | do_gettimeofday(&t); |
988 | printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 1039 | printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
989 | #endif | 1040 | #endif |
990 | smi_event_handler(smi_info, 0); | 1041 | smi_event_handler(smi_info, 0); |
991 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 1042 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
@@ -1028,7 +1079,7 @@ static int smi_start_processing(void *send_info, | |||
1028 | * The BT interface is efficient enough to not need a thread, | 1079 | * The BT interface is efficient enough to not need a thread, |
1029 | * and there is no need for a thread if we have interrupts. | 1080 | * and there is no need for a thread if we have interrupts. |
1030 | */ | 1081 | */ |
1031 | else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) | 1082 | else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) |
1032 | enable = 1; | 1083 | enable = 1; |
1033 | 1084 | ||
1034 | if (enable) { | 1085 | if (enable) { |
@@ -1054,8 +1105,7 @@ static void set_maintenance_mode(void *send_info, int enable) | |||
1054 | atomic_set(&smi_info->req_events, 0); | 1105 | atomic_set(&smi_info->req_events, 0); |
1055 | } | 1106 | } |
1056 | 1107 | ||
1057 | static struct ipmi_smi_handlers handlers = | 1108 | static struct ipmi_smi_handlers handlers = { |
1058 | { | ||
1059 | .owner = THIS_MODULE, | 1109 | .owner = THIS_MODULE, |
1060 | .start_processing = smi_start_processing, | 1110 | .start_processing = smi_start_processing, |
1061 | .sender = sender, | 1111 | .sender = sender, |
@@ -1065,8 +1115,10 @@ static struct ipmi_smi_handlers handlers = | |||
1065 | .poll = poll, | 1115 | .poll = poll, |
1066 | }; | 1116 | }; |
1067 | 1117 | ||
1068 | /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses, | 1118 | /* |
1069 | a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ | 1119 | * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, |
1120 | * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS. | ||
1121 | */ | ||
1070 | 1122 | ||
1071 | static LIST_HEAD(smi_infos); | 1123 | static LIST_HEAD(smi_infos); |
1072 | static DEFINE_MUTEX(smi_infos_lock); | 1124 | static DEFINE_MUTEX(smi_infos_lock); |
@@ -1257,10 +1309,9 @@ static void port_cleanup(struct smi_info *info) | |||
1257 | int idx; | 1309 | int idx; |
1258 | 1310 | ||
1259 | if (addr) { | 1311 | if (addr) { |
1260 | for (idx = 0; idx < info->io_size; idx++) { | 1312 | for (idx = 0; idx < info->io_size; idx++) |
1261 | release_region(addr + idx * info->io.regspacing, | 1313 | release_region(addr + idx * info->io.regspacing, |
1262 | info->io.regsize); | 1314 | info->io.regsize); |
1263 | } | ||
1264 | } | 1315 | } |
1265 | } | 1316 | } |
1266 | 1317 | ||
@@ -1274,8 +1325,10 @@ static int port_setup(struct smi_info *info) | |||
1274 | 1325 | ||
1275 | info->io_cleanup = port_cleanup; | 1326 | info->io_cleanup = port_cleanup; |
1276 | 1327 | ||
1277 | /* Figure out the actual inb/inw/inl/etc routine to use based | 1328 | /* |
1278 | upon the register size. */ | 1329 | * Figure out the actual inb/inw/inl/etc routine to use based |
1330 | * upon the register size. | ||
1331 | */ | ||
1279 | switch (info->io.regsize) { | 1332 | switch (info->io.regsize) { |
1280 | case 1: | 1333 | case 1: |
1281 | info->io.inputb = port_inb; | 1334 | info->io.inputb = port_inb; |
@@ -1290,17 +1343,18 @@ static int port_setup(struct smi_info *info) | |||
1290 | info->io.outputb = port_outl; | 1343 | info->io.outputb = port_outl; |
1291 | break; | 1344 | break; |
1292 | default: | 1345 | default: |
1293 | printk("ipmi_si: Invalid register size: %d\n", | 1346 | printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", |
1294 | info->io.regsize); | 1347 | info->io.regsize); |
1295 | return -EINVAL; | 1348 | return -EINVAL; |
1296 | } | 1349 | } |
1297 | 1350 | ||
1298 | /* Some BIOSes reserve disjoint I/O regions in their ACPI | 1351 | /* |
1352 | * Some BIOSes reserve disjoint I/O regions in their ACPI | ||
1299 | * tables. This causes problems when trying to register the | 1353 | * tables. This causes problems when trying to register the |
1300 | * entire I/O region. Therefore we must register each I/O | 1354 | * entire I/O region. Therefore we must register each I/O |
1301 | * port separately. | 1355 | * port separately. |
1302 | */ | 1356 | */ |
1303 | for (idx = 0; idx < info->io_size; idx++) { | 1357 | for (idx = 0; idx < info->io_size; idx++) { |
1304 | if (request_region(addr + idx * info->io.regspacing, | 1358 | if (request_region(addr + idx * info->io.regspacing, |
1305 | info->io.regsize, DEVICE_NAME) == NULL) { | 1359 | info->io.regsize, DEVICE_NAME) == NULL) { |
1306 | /* Undo allocations */ | 1360 | /* Undo allocations */ |
@@ -1388,8 +1442,10 @@ static int mem_setup(struct smi_info *info) | |||
1388 | 1442 | ||
1389 | info->io_cleanup = mem_cleanup; | 1443 | info->io_cleanup = mem_cleanup; |
1390 | 1444 | ||
1391 | /* Figure out the actual readb/readw/readl/etc routine to use based | 1445 | /* |
1392 | upon the register size. */ | 1446 | * Figure out the actual readb/readw/readl/etc routine to use based |
1447 | * upon the register size. | ||
1448 | */ | ||
1393 | switch (info->io.regsize) { | 1449 | switch (info->io.regsize) { |
1394 | case 1: | 1450 | case 1: |
1395 | info->io.inputb = intf_mem_inb; | 1451 | info->io.inputb = intf_mem_inb; |
@@ -1410,16 +1466,18 @@ static int mem_setup(struct smi_info *info) | |||
1410 | break; | 1466 | break; |
1411 | #endif | 1467 | #endif |
1412 | default: | 1468 | default: |
1413 | printk("ipmi_si: Invalid register size: %d\n", | 1469 | printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", |
1414 | info->io.regsize); | 1470 | info->io.regsize); |
1415 | return -EINVAL; | 1471 | return -EINVAL; |
1416 | } | 1472 | } |
1417 | 1473 | ||
1418 | /* Calculate the total amount of memory to claim. This is an | 1474 | /* |
1475 | * Calculate the total amount of memory to claim. This is an | ||
1419 | * unusual looking calculation, but it avoids claiming any | 1476 | * unusual looking calculation, but it avoids claiming any |
1420 | * more memory than it has to. It will claim everything | 1477 | * more memory than it has to. It will claim everything |
1421 | * between the first address to the end of the last full | 1478 | * between the first address to the end of the last full |
1422 | * register. */ | 1479 | * register. |
1480 | */ | ||
1423 | mapsize = ((info->io_size * info->io.regspacing) | 1481 | mapsize = ((info->io_size * info->io.regspacing) |
1424 | - (info->io.regspacing - info->io.regsize)); | 1482 | - (info->io.regspacing - info->io.regsize)); |
1425 | 1483 | ||
@@ -1749,9 +1807,11 @@ static __devinit void hardcode_find_bmc(void) | |||
1749 | 1807 | ||
1750 | #include <linux/acpi.h> | 1808 | #include <linux/acpi.h> |
1751 | 1809 | ||
1752 | /* Once we get an ACPI failure, we don't try any more, because we go | 1810 | /* |
1753 | through the tables sequentially. Once we don't find a table, there | 1811 | * Once we get an ACPI failure, we don't try any more, because we go |
1754 | are no more. */ | 1812 | * through the tables sequentially. Once we don't find a table, there |
1813 | * are no more. | ||
1814 | */ | ||
1755 | static int acpi_failure; | 1815 | static int acpi_failure; |
1756 | 1816 | ||
1757 | /* For GPE-type interrupts. */ | 1817 | /* For GPE-type interrupts. */ |
@@ -1765,9 +1825,7 @@ static u32 ipmi_acpi_gpe(void *context) | |||
1765 | 1825 | ||
1766 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 1826 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
1767 | 1827 | ||
1768 | spin_lock(&smi_info->count_lock); | 1828 | smi_inc_stat(smi_info, interrupts); |
1769 | smi_info->interrupts++; | ||
1770 | spin_unlock(&smi_info->count_lock); | ||
1771 | 1829 | ||
1772 | #ifdef DEBUG_TIMING | 1830 | #ifdef DEBUG_TIMING |
1773 | do_gettimeofday(&t); | 1831 | do_gettimeofday(&t); |
@@ -1816,7 +1874,8 @@ static int acpi_gpe_irq_setup(struct smi_info *info) | |||
1816 | 1874 | ||
1817 | /* | 1875 | /* |
1818 | * Defined at | 1876 | * Defined at |
1819 | * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf | 1877 | * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/ |
1878 | * Docs/TechPapers/IA64/hpspmi.pdf | ||
1820 | */ | 1879 | */ |
1821 | struct SPMITable { | 1880 | struct SPMITable { |
1822 | s8 Signature[4]; | 1881 | s8 Signature[4]; |
@@ -1838,14 +1897,18 @@ struct SPMITable { | |||
1838 | */ | 1897 | */ |
1839 | u8 InterruptType; | 1898 | u8 InterruptType; |
1840 | 1899 | ||
1841 | /* If bit 0 of InterruptType is set, then this is the SCI | 1900 | /* |
1842 | interrupt in the GPEx_STS register. */ | 1901 | * If bit 0 of InterruptType is set, then this is the SCI |
1902 | * interrupt in the GPEx_STS register. | ||
1903 | */ | ||
1843 | u8 GPE; | 1904 | u8 GPE; |
1844 | 1905 | ||
1845 | s16 Reserved; | 1906 | s16 Reserved; |
1846 | 1907 | ||
1847 | /* If bit 1 of InterruptType is set, then this is the I/O | 1908 | /* |
1848 | APIC/SAPIC interrupt. */ | 1909 | * If bit 1 of InterruptType is set, then this is the I/O |
1910 | * APIC/SAPIC interrupt. | ||
1911 | */ | ||
1849 | u32 GlobalSystemInterrupt; | 1912 | u32 GlobalSystemInterrupt; |
1850 | 1913 | ||
1851 | /* The actual register address. */ | 1914 | /* The actual register address. */ |
@@ -1863,7 +1926,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi) | |||
1863 | 1926 | ||
1864 | if (spmi->IPMIlegacy != 1) { | 1927 | if (spmi->IPMIlegacy != 1) { |
1865 | printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); | 1928 | printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); |
1866 | return -ENODEV; | 1929 | return -ENODEV; |
1867 | } | 1930 | } |
1868 | 1931 | ||
1869 | if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) | 1932 | if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
@@ -1880,8 +1943,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi) | |||
1880 | info->addr_source = "ACPI"; | 1943 | info->addr_source = "ACPI"; |
1881 | 1944 | ||
1882 | /* Figure out the interface type. */ | 1945 | /* Figure out the interface type. */ |
1883 | switch (spmi->InterfaceType) | 1946 | switch (spmi->InterfaceType) { |
1884 | { | ||
1885 | case 1: /* KCS */ | 1947 | case 1: /* KCS */ |
1886 | info->si_type = SI_KCS; | 1948 | info->si_type = SI_KCS; |
1887 | break; | 1949 | break; |
@@ -1929,7 +1991,8 @@ static __devinit int try_init_acpi(struct SPMITable *spmi) | |||
1929 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 1991 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
1930 | } else { | 1992 | } else { |
1931 | kfree(info); | 1993 | kfree(info); |
1932 | printk("ipmi_si: Unknown ACPI I/O Address type\n"); | 1994 | printk(KERN_WARNING |
1995 | "ipmi_si: Unknown ACPI I/O Address type\n"); | ||
1933 | return -EIO; | 1996 | return -EIO; |
1934 | } | 1997 | } |
1935 | info->io.addr_data = spmi->addr.address; | 1998 | info->io.addr_data = spmi->addr.address; |
@@ -1963,8 +2026,7 @@ static __devinit void acpi_find_bmc(void) | |||
1963 | #endif | 2026 | #endif |
1964 | 2027 | ||
1965 | #ifdef CONFIG_DMI | 2028 | #ifdef CONFIG_DMI |
1966 | struct dmi_ipmi_data | 2029 | struct dmi_ipmi_data { |
1967 | { | ||
1968 | u8 type; | 2030 | u8 type; |
1969 | u8 addr_space; | 2031 | u8 addr_space; |
1970 | unsigned long base_addr; | 2032 | unsigned long base_addr; |
@@ -1989,11 +2051,10 @@ static int __devinit decode_dmi(const struct dmi_header *dm, | |||
1989 | /* I/O */ | 2051 | /* I/O */ |
1990 | base_addr &= 0xFFFE; | 2052 | base_addr &= 0xFFFE; |
1991 | dmi->addr_space = IPMI_IO_ADDR_SPACE; | 2053 | dmi->addr_space = IPMI_IO_ADDR_SPACE; |
1992 | } | 2054 | } else |
1993 | else { | ||
1994 | /* Memory */ | 2055 | /* Memory */ |
1995 | dmi->addr_space = IPMI_MEM_ADDR_SPACE; | 2056 | dmi->addr_space = IPMI_MEM_ADDR_SPACE; |
1996 | } | 2057 | |
1997 | /* If bit 4 of byte 0x10 is set, then the lsb for the address | 2058 | /* If bit 4 of byte 0x10 is set, then the lsb for the address |
1998 | is odd. */ | 2059 | is odd. */ |
1999 | dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); | 2060 | dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); |
@@ -2002,7 +2063,7 @@ static int __devinit decode_dmi(const struct dmi_header *dm, | |||
2002 | 2063 | ||
2003 | /* The top two bits of byte 0x10 hold the register spacing. */ | 2064 | /* The top two bits of byte 0x10 hold the register spacing. */ |
2004 | reg_spacing = (data[0x10] & 0xC0) >> 6; | 2065 | reg_spacing = (data[0x10] & 0xC0) >> 6; |
2005 | switch(reg_spacing){ | 2066 | switch (reg_spacing) { |
2006 | case 0x00: /* Byte boundaries */ | 2067 | case 0x00: /* Byte boundaries */ |
2007 | dmi->offset = 1; | 2068 | dmi->offset = 1; |
2008 | break; | 2069 | break; |
@@ -2018,12 +2079,14 @@ static int __devinit decode_dmi(const struct dmi_header *dm, | |||
2018 | } | 2079 | } |
2019 | } else { | 2080 | } else { |
2020 | /* Old DMI spec. */ | 2081 | /* Old DMI spec. */ |
2021 | /* Note that technically, the lower bit of the base | 2082 | /* |
2083 | * Note that technically, the lower bit of the base | ||
2022 | * address should be 1 if the address is I/O and 0 if | 2084 | * address should be 1 if the address is I/O and 0 if |
2023 | * the address is in memory. So many systems get that | 2085 | * the address is in memory. So many systems get that |
2024 | * wrong (and all that I have seen are I/O) so we just | 2086 | * wrong (and all that I have seen are I/O) so we just |
2025 | * ignore that bit and assume I/O. Systems that use | 2087 | * ignore that bit and assume I/O. Systems that use |
2026 | * memory should use the newer spec, anyway. */ | 2088 | * memory should use the newer spec, anyway. |
2089 | */ | ||
2027 | dmi->base_addr = base_addr & 0xfffe; | 2090 | dmi->base_addr = base_addr & 0xfffe; |
2028 | dmi->addr_space = IPMI_IO_ADDR_SPACE; | 2091 | dmi->addr_space = IPMI_IO_ADDR_SPACE; |
2029 | dmi->offset = 1; | 2092 | dmi->offset = 1; |
@@ -2230,13 +2293,13 @@ static struct pci_device_id ipmi_pci_devices[] = { | |||
2230 | MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); | 2293 | MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); |
2231 | 2294 | ||
2232 | static struct pci_driver ipmi_pci_driver = { | 2295 | static struct pci_driver ipmi_pci_driver = { |
2233 | .name = DEVICE_NAME, | 2296 | .name = DEVICE_NAME, |
2234 | .id_table = ipmi_pci_devices, | 2297 | .id_table = ipmi_pci_devices, |
2235 | .probe = ipmi_pci_probe, | 2298 | .probe = ipmi_pci_probe, |
2236 | .remove = __devexit_p(ipmi_pci_remove), | 2299 | .remove = __devexit_p(ipmi_pci_remove), |
2237 | #ifdef CONFIG_PM | 2300 | #ifdef CONFIG_PM |
2238 | .suspend = ipmi_pci_suspend, | 2301 | .suspend = ipmi_pci_suspend, |
2239 | .resume = ipmi_pci_resume, | 2302 | .resume = ipmi_pci_resume, |
2240 | #endif | 2303 | #endif |
2241 | }; | 2304 | }; |
2242 | #endif /* CONFIG_PCI */ | 2305 | #endif /* CONFIG_PCI */ |
@@ -2306,7 +2369,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev, | |||
2306 | info->io.addr_data, info->io.regsize, info->io.regspacing, | 2369 | info->io.addr_data, info->io.regsize, info->io.regspacing, |
2307 | info->irq); | 2370 | info->irq); |
2308 | 2371 | ||
2309 | dev->dev.driver_data = (void*) info; | 2372 | dev->dev.driver_data = (void *) info; |
2310 | 2373 | ||
2311 | return try_smi_init(info); | 2374 | return try_smi_init(info); |
2312 | } | 2375 | } |
@@ -2319,14 +2382,16 @@ static int __devexit ipmi_of_remove(struct of_device *dev) | |||
2319 | 2382 | ||
2320 | static struct of_device_id ipmi_match[] = | 2383 | static struct of_device_id ipmi_match[] = |
2321 | { | 2384 | { |
2322 | { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS }, | 2385 | { .type = "ipmi", .compatible = "ipmi-kcs", |
2323 | { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC }, | 2386 | .data = (void *)(unsigned long) SI_KCS }, |
2324 | { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT }, | 2387 | { .type = "ipmi", .compatible = "ipmi-smic", |
2388 | .data = (void *)(unsigned long) SI_SMIC }, | ||
2389 | { .type = "ipmi", .compatible = "ipmi-bt", | ||
2390 | .data = (void *)(unsigned long) SI_BT }, | ||
2325 | {}, | 2391 | {}, |
2326 | }; | 2392 | }; |
2327 | 2393 | ||
2328 | static struct of_platform_driver ipmi_of_platform_driver = | 2394 | static struct of_platform_driver ipmi_of_platform_driver = { |
2329 | { | ||
2330 | .name = "ipmi", | 2395 | .name = "ipmi", |
2331 | .match_table = ipmi_match, | 2396 | .match_table = ipmi_match, |
2332 | .probe = ipmi_of_probe, | 2397 | .probe = ipmi_of_probe, |
@@ -2347,32 +2412,32 @@ static int try_get_dev_id(struct smi_info *smi_info) | |||
2347 | if (!resp) | 2412 | if (!resp) |
2348 | return -ENOMEM; | 2413 | return -ENOMEM; |
2349 | 2414 | ||
2350 | /* Do a Get Device ID command, since it comes back with some | 2415 | /* |
2351 | useful info. */ | 2416 | * Do a Get Device ID command, since it comes back with some |
2417 | * useful info. | ||
2418 | */ | ||
2352 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | 2419 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; |
2353 | msg[1] = IPMI_GET_DEVICE_ID_CMD; | 2420 | msg[1] = IPMI_GET_DEVICE_ID_CMD; |
2354 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | 2421 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
2355 | 2422 | ||
2356 | smi_result = smi_info->handlers->event(smi_info->si_sm, 0); | 2423 | smi_result = smi_info->handlers->event(smi_info->si_sm, 0); |
2357 | for (;;) | 2424 | for (;;) { |
2358 | { | ||
2359 | if (smi_result == SI_SM_CALL_WITH_DELAY || | 2425 | if (smi_result == SI_SM_CALL_WITH_DELAY || |
2360 | smi_result == SI_SM_CALL_WITH_TICK_DELAY) { | 2426 | smi_result == SI_SM_CALL_WITH_TICK_DELAY) { |
2361 | schedule_timeout_uninterruptible(1); | 2427 | schedule_timeout_uninterruptible(1); |
2362 | smi_result = smi_info->handlers->event( | 2428 | smi_result = smi_info->handlers->event( |
2363 | smi_info->si_sm, 100); | 2429 | smi_info->si_sm, 100); |
2364 | } | 2430 | } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { |
2365 | else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) | ||
2366 | { | ||
2367 | smi_result = smi_info->handlers->event( | 2431 | smi_result = smi_info->handlers->event( |
2368 | smi_info->si_sm, 0); | 2432 | smi_info->si_sm, 0); |
2369 | } | 2433 | } else |
2370 | else | ||
2371 | break; | 2434 | break; |
2372 | } | 2435 | } |
2373 | if (smi_result == SI_SM_HOSED) { | 2436 | if (smi_result == SI_SM_HOSED) { |
2374 | /* We couldn't get the state machine to run, so whatever's at | 2437 | /* |
2375 | the port is probably not an IPMI SMI interface. */ | 2438 | * We couldn't get the state machine to run, so whatever's at |
2439 | * the port is probably not an IPMI SMI interface. | ||
2440 | */ | ||
2376 | rv = -ENODEV; | 2441 | rv = -ENODEV; |
2377 | goto out; | 2442 | goto out; |
2378 | } | 2443 | } |
@@ -2405,30 +2470,28 @@ static int stat_file_read_proc(char *page, char **start, off_t off, | |||
2405 | 2470 | ||
2406 | out += sprintf(out, "interrupts_enabled: %d\n", | 2471 | out += sprintf(out, "interrupts_enabled: %d\n", |
2407 | smi->irq && !smi->interrupt_disabled); | 2472 | smi->irq && !smi->interrupt_disabled); |
2408 | out += sprintf(out, "short_timeouts: %ld\n", | 2473 | out += sprintf(out, "short_timeouts: %u\n", |
2409 | smi->short_timeouts); | 2474 | smi_get_stat(smi, short_timeouts)); |
2410 | out += sprintf(out, "long_timeouts: %ld\n", | 2475 | out += sprintf(out, "long_timeouts: %u\n", |
2411 | smi->long_timeouts); | 2476 | smi_get_stat(smi, long_timeouts)); |
2412 | out += sprintf(out, "timeout_restarts: %ld\n", | 2477 | out += sprintf(out, "idles: %u\n", |
2413 | smi->timeout_restarts); | 2478 | smi_get_stat(smi, idles)); |
2414 | out += sprintf(out, "idles: %ld\n", | 2479 | out += sprintf(out, "interrupts: %u\n", |
2415 | smi->idles); | 2480 | smi_get_stat(smi, interrupts)); |
2416 | out += sprintf(out, "interrupts: %ld\n", | 2481 | out += sprintf(out, "attentions: %u\n", |
2417 | smi->interrupts); | 2482 | smi_get_stat(smi, attentions)); |
2418 | out += sprintf(out, "attentions: %ld\n", | 2483 | out += sprintf(out, "flag_fetches: %u\n", |
2419 | smi->attentions); | 2484 | smi_get_stat(smi, flag_fetches)); |
2420 | out += sprintf(out, "flag_fetches: %ld\n", | 2485 | out += sprintf(out, "hosed_count: %u\n", |
2421 | smi->flag_fetches); | 2486 | smi_get_stat(smi, hosed_count)); |
2422 | out += sprintf(out, "hosed_count: %ld\n", | 2487 | out += sprintf(out, "complete_transactions: %u\n", |
2423 | smi->hosed_count); | 2488 | smi_get_stat(smi, complete_transactions)); |
2424 | out += sprintf(out, "complete_transactions: %ld\n", | 2489 | out += sprintf(out, "events: %u\n", |
2425 | smi->complete_transactions); | 2490 | smi_get_stat(smi, events)); |
2426 | out += sprintf(out, "events: %ld\n", | 2491 | out += sprintf(out, "watchdog_pretimeouts: %u\n", |
2427 | smi->events); | 2492 | smi_get_stat(smi, watchdog_pretimeouts)); |
2428 | out += sprintf(out, "watchdog_pretimeouts: %ld\n", | 2493 | out += sprintf(out, "incoming_messages: %u\n", |
2429 | smi->watchdog_pretimeouts); | 2494 | smi_get_stat(smi, incoming_messages)); |
2430 | out += sprintf(out, "incoming_messages: %ld\n", | ||
2431 | smi->incoming_messages); | ||
2432 | 2495 | ||
2433 | return out - page; | 2496 | return out - page; |
2434 | } | 2497 | } |
@@ -2460,7 +2523,7 @@ static int param_read_proc(char *page, char **start, off_t off, | |||
2460 | static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) | 2523 | static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) |
2461 | { | 2524 | { |
2462 | smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | | 2525 | smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | |
2463 | RECEIVE_MSG_AVAIL); | 2526 | RECEIVE_MSG_AVAIL); |
2464 | return 1; | 2527 | return 1; |
2465 | } | 2528 | } |
2466 | 2529 | ||
@@ -2502,10 +2565,9 @@ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) | |||
2502 | id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { | 2565 | id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { |
2503 | smi_info->oem_data_avail_handler = | 2566 | smi_info->oem_data_avail_handler = |
2504 | oem_data_avail_to_receive_msg_avail; | 2567 | oem_data_avail_to_receive_msg_avail; |
2505 | } | 2568 | } else if (ipmi_version_major(id) < 1 || |
2506 | else if (ipmi_version_major(id) < 1 || | 2569 | (ipmi_version_major(id) == 1 && |
2507 | (ipmi_version_major(id) == 1 && | 2570 | ipmi_version_minor(id) < 5)) { |
2508 | ipmi_version_minor(id) < 5)) { | ||
2509 | smi_info->oem_data_avail_handler = | 2571 | smi_info->oem_data_avail_handler = |
2510 | oem_data_avail_to_receive_msg_avail; | 2572 | oem_data_avail_to_receive_msg_avail; |
2511 | } | 2573 | } |
@@ -2597,8 +2659,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info) | |||
2597 | static inline void wait_for_timer_and_thread(struct smi_info *smi_info) | 2659 | static inline void wait_for_timer_and_thread(struct smi_info *smi_info) |
2598 | { | 2660 | { |
2599 | if (smi_info->intf) { | 2661 | if (smi_info->intf) { |
2600 | /* The timer and thread are only running if the | 2662 | /* |
2601 | interface has been started up and registered. */ | 2663 | * The timer and thread are only running if the |
2664 | * interface has been started up and registered. | ||
2665 | */ | ||
2602 | if (smi_info->thread != NULL) | 2666 | if (smi_info->thread != NULL) |
2603 | kthread_stop(smi_info->thread); | 2667 | kthread_stop(smi_info->thread); |
2604 | del_timer_sync(&smi_info->si_timer); | 2668 | del_timer_sync(&smi_info->si_timer); |
@@ -2676,6 +2740,7 @@ static int is_new_interface(struct smi_info *info) | |||
2676 | static int try_smi_init(struct smi_info *new_smi) | 2740 | static int try_smi_init(struct smi_info *new_smi) |
2677 | { | 2741 | { |
2678 | int rv; | 2742 | int rv; |
2743 | int i; | ||
2679 | 2744 | ||
2680 | if (new_smi->addr_source) { | 2745 | if (new_smi->addr_source) { |
2681 | printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" | 2746 | printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" |
@@ -2722,7 +2787,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2722 | /* Allocate the state machine's data and initialize it. */ | 2787 | /* Allocate the state machine's data and initialize it. */ |
2723 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); | 2788 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); |
2724 | if (!new_smi->si_sm) { | 2789 | if (!new_smi->si_sm) { |
2725 | printk(" Could not allocate state machine memory\n"); | 2790 | printk(KERN_ERR "Could not allocate state machine memory\n"); |
2726 | rv = -ENOMEM; | 2791 | rv = -ENOMEM; |
2727 | goto out_err; | 2792 | goto out_err; |
2728 | } | 2793 | } |
@@ -2732,13 +2797,12 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2732 | /* Now that we know the I/O size, we can set up the I/O. */ | 2797 | /* Now that we know the I/O size, we can set up the I/O. */ |
2733 | rv = new_smi->io_setup(new_smi); | 2798 | rv = new_smi->io_setup(new_smi); |
2734 | if (rv) { | 2799 | if (rv) { |
2735 | printk(" Could not set up I/O space\n"); | 2800 | printk(KERN_ERR "Could not set up I/O space\n"); |
2736 | goto out_err; | 2801 | goto out_err; |
2737 | } | 2802 | } |
2738 | 2803 | ||
2739 | spin_lock_init(&(new_smi->si_lock)); | 2804 | spin_lock_init(&(new_smi->si_lock)); |
2740 | spin_lock_init(&(new_smi->msg_lock)); | 2805 | spin_lock_init(&(new_smi->msg_lock)); |
2741 | spin_lock_init(&(new_smi->count_lock)); | ||
2742 | 2806 | ||
2743 | /* Do low-level detection first. */ | 2807 | /* Do low-level detection first. */ |
2744 | if (new_smi->handlers->detect(new_smi->si_sm)) { | 2808 | if (new_smi->handlers->detect(new_smi->si_sm)) { |
@@ -2749,8 +2813,10 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2749 | goto out_err; | 2813 | goto out_err; |
2750 | } | 2814 | } |
2751 | 2815 | ||
2752 | /* Attempt a get device id command. If it fails, we probably | 2816 | /* |
2753 | don't have a BMC here. */ | 2817 | * Attempt a get device id command. If it fails, we probably |
2818 | * don't have a BMC here. | ||
2819 | */ | ||
2754 | rv = try_get_dev_id(new_smi); | 2820 | rv = try_get_dev_id(new_smi); |
2755 | if (rv) { | 2821 | if (rv) { |
2756 | if (new_smi->addr_source) | 2822 | if (new_smi->addr_source) |
@@ -2767,22 +2833,28 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2767 | new_smi->curr_msg = NULL; | 2833 | new_smi->curr_msg = NULL; |
2768 | atomic_set(&new_smi->req_events, 0); | 2834 | atomic_set(&new_smi->req_events, 0); |
2769 | new_smi->run_to_completion = 0; | 2835 | new_smi->run_to_completion = 0; |
2836 | for (i = 0; i < SI_NUM_STATS; i++) | ||
2837 | atomic_set(&new_smi->stats[i], 0); | ||
2770 | 2838 | ||
2771 | new_smi->interrupt_disabled = 0; | 2839 | new_smi->interrupt_disabled = 0; |
2772 | atomic_set(&new_smi->stop_operation, 0); | 2840 | atomic_set(&new_smi->stop_operation, 0); |
2773 | new_smi->intf_num = smi_num; | 2841 | new_smi->intf_num = smi_num; |
2774 | smi_num++; | 2842 | smi_num++; |
2775 | 2843 | ||
2776 | /* Start clearing the flags before we enable interrupts or the | 2844 | /* |
2777 | timer to avoid racing with the timer. */ | 2845 | * Start clearing the flags before we enable interrupts or the |
2846 | * timer to avoid racing with the timer. | ||
2847 | */ | ||
2778 | start_clear_flags(new_smi); | 2848 | start_clear_flags(new_smi); |
2779 | /* IRQ is defined to be set when non-zero. */ | 2849 | /* IRQ is defined to be set when non-zero. */ |
2780 | if (new_smi->irq) | 2850 | if (new_smi->irq) |
2781 | new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; | 2851 | new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; |
2782 | 2852 | ||
2783 | if (!new_smi->dev) { | 2853 | if (!new_smi->dev) { |
2784 | /* If we don't already have a device from something | 2854 | /* |
2785 | * else (like PCI), then register a new one. */ | 2855 | * If we don't already have a device from something |
2856 | * else (like PCI), then register a new one. | ||
2857 | */ | ||
2786 | new_smi->pdev = platform_device_alloc("ipmi_si", | 2858 | new_smi->pdev = platform_device_alloc("ipmi_si", |
2787 | new_smi->intf_num); | 2859 | new_smi->intf_num); |
2788 | if (rv) { | 2860 | if (rv) { |
@@ -2820,7 +2892,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2820 | } | 2892 | } |
2821 | 2893 | ||
2822 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", | 2894 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", |
2823 | type_file_read_proc, NULL, | 2895 | type_file_read_proc, |
2824 | new_smi, THIS_MODULE); | 2896 | new_smi, THIS_MODULE); |
2825 | if (rv) { | 2897 | if (rv) { |
2826 | printk(KERN_ERR | 2898 | printk(KERN_ERR |
@@ -2830,7 +2902,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2830 | } | 2902 | } |
2831 | 2903 | ||
2832 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", | 2904 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", |
2833 | stat_file_read_proc, NULL, | 2905 | stat_file_read_proc, |
2834 | new_smi, THIS_MODULE); | 2906 | new_smi, THIS_MODULE); |
2835 | if (rv) { | 2907 | if (rv) { |
2836 | printk(KERN_ERR | 2908 | printk(KERN_ERR |
@@ -2840,7 +2912,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2840 | } | 2912 | } |
2841 | 2913 | ||
2842 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", | 2914 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", |
2843 | param_read_proc, NULL, | 2915 | param_read_proc, |
2844 | new_smi, THIS_MODULE); | 2916 | new_smi, THIS_MODULE); |
2845 | if (rv) { | 2917 | if (rv) { |
2846 | printk(KERN_ERR | 2918 | printk(KERN_ERR |
@@ -2853,7 +2925,8 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2853 | 2925 | ||
2854 | mutex_unlock(&smi_infos_lock); | 2926 | mutex_unlock(&smi_infos_lock); |
2855 | 2927 | ||
2856 | printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]); | 2928 | printk(KERN_INFO "IPMI %s interface initialized\n", |
2929 | si_to_str[new_smi->si_type]); | ||
2857 | 2930 | ||
2858 | return 0; | 2931 | return 0; |
2859 | 2932 | ||
@@ -2868,9 +2941,11 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2868 | if (new_smi->irq_cleanup) | 2941 | if (new_smi->irq_cleanup) |
2869 | new_smi->irq_cleanup(new_smi); | 2942 | new_smi->irq_cleanup(new_smi); |
2870 | 2943 | ||
2871 | /* Wait until we know that we are out of any interrupt | 2944 | /* |
2872 | handlers might have been running before we freed the | 2945 | * Wait until we know that we are out of any interrupt |
2873 | interrupt. */ | 2946 | * handlers might have been running before we freed the |
2947 | * interrupt. | ||
2948 | */ | ||
2874 | synchronize_sched(); | 2949 | synchronize_sched(); |
2875 | 2950 | ||
2876 | if (new_smi->si_sm) { | 2951 | if (new_smi->si_sm) { |
@@ -2942,11 +3017,10 @@ static __devinit int init_ipmi_si(void) | |||
2942 | 3017 | ||
2943 | #ifdef CONFIG_PCI | 3018 | #ifdef CONFIG_PCI |
2944 | rv = pci_register_driver(&ipmi_pci_driver); | 3019 | rv = pci_register_driver(&ipmi_pci_driver); |
2945 | if (rv){ | 3020 | if (rv) |
2946 | printk(KERN_ERR | 3021 | printk(KERN_ERR |
2947 | "init_ipmi_si: Unable to register PCI driver: %d\n", | 3022 | "init_ipmi_si: Unable to register PCI driver: %d\n", |
2948 | rv); | 3023 | rv); |
2949 | } | ||
2950 | #endif | 3024 | #endif |
2951 | 3025 | ||
2952 | #ifdef CONFIG_PPC_OF | 3026 | #ifdef CONFIG_PPC_OF |
@@ -2975,7 +3049,8 @@ static __devinit int init_ipmi_si(void) | |||
2975 | of_unregister_platform_driver(&ipmi_of_platform_driver); | 3049 | of_unregister_platform_driver(&ipmi_of_platform_driver); |
2976 | #endif | 3050 | #endif |
2977 | driver_unregister(&ipmi_driver); | 3051 | driver_unregister(&ipmi_driver); |
2978 | printk("ipmi_si: Unable to find any System Interface(s)\n"); | 3052 | printk(KERN_WARNING |
3053 | "ipmi_si: Unable to find any System Interface(s)\n"); | ||
2979 | return -ENODEV; | 3054 | return -ENODEV; |
2980 | } else { | 3055 | } else { |
2981 | mutex_unlock(&smi_infos_lock); | 3056 | mutex_unlock(&smi_infos_lock); |
@@ -2997,13 +3072,17 @@ static void cleanup_one_si(struct smi_info *to_clean) | |||
2997 | /* Tell the driver that we are shutting down. */ | 3072 | /* Tell the driver that we are shutting down. */ |
2998 | atomic_inc(&to_clean->stop_operation); | 3073 | atomic_inc(&to_clean->stop_operation); |
2999 | 3074 | ||
3000 | /* Make sure the timer and thread are stopped and will not run | 3075 | /* |
3001 | again. */ | 3076 | * Make sure the timer and thread are stopped and will not run |
3077 | * again. | ||
3078 | */ | ||
3002 | wait_for_timer_and_thread(to_clean); | 3079 | wait_for_timer_and_thread(to_clean); |
3003 | 3080 | ||
3004 | /* Timeouts are stopped, now make sure the interrupts are off | 3081 | /* |
3005 | for the device. A little tricky with locks to make sure | 3082 | * Timeouts are stopped, now make sure the interrupts are off |
3006 | there are no races. */ | 3083 | * for the device. A little tricky with locks to make sure |
3084 | * there are no races. | ||
3085 | */ | ||
3007 | spin_lock_irqsave(&to_clean->si_lock, flags); | 3086 | spin_lock_irqsave(&to_clean->si_lock, flags); |
3008 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { | 3087 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
3009 | spin_unlock_irqrestore(&to_clean->si_lock, flags); | 3088 | spin_unlock_irqrestore(&to_clean->si_lock, flags); |
@@ -3074,4 +3153,5 @@ module_exit(cleanup_ipmi_si); | |||
3074 | 3153 | ||
3075 | MODULE_LICENSE("GPL"); | 3154 | MODULE_LICENSE("GPL"); |
3076 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); | 3155 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); |
3077 | MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces."); | 3156 | MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" |
3157 | " system interfaces."); | ||
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h index 4b731b24dc16..df89f73475fb 100644 --- a/drivers/char/ipmi/ipmi_si_sm.h +++ b/drivers/char/ipmi/ipmi_si_sm.h | |||
@@ -34,22 +34,27 @@ | |||
34 | * 675 Mass Ave, Cambridge, MA 02139, USA. | 34 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | /* This is defined by the state machines themselves, it is an opaque | 37 | /* |
38 | data type for them to use. */ | 38 | * This is defined by the state machines themselves, it is an opaque |
39 | * data type for them to use. | ||
40 | */ | ||
39 | struct si_sm_data; | 41 | struct si_sm_data; |
40 | 42 | ||
41 | /* The structure for doing I/O in the state machine. The state | 43 | /* |
42 | machine doesn't have the actual I/O routines, they are done through | 44 | * The structure for doing I/O in the state machine. The state |
43 | this interface. */ | 45 | * machine doesn't have the actual I/O routines, they are done through |
44 | struct si_sm_io | 46 | * this interface. |
45 | { | 47 | */ |
48 | struct si_sm_io { | ||
46 | unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); | 49 | unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); |
47 | void (*outputb)(struct si_sm_io *io, | 50 | void (*outputb)(struct si_sm_io *io, |
48 | unsigned int offset, | 51 | unsigned int offset, |
49 | unsigned char b); | 52 | unsigned char b); |
50 | 53 | ||
51 | /* Generic info used by the actual handling routines, the | 54 | /* |
52 | state machine shouldn't touch these. */ | 55 | * Generic info used by the actual handling routines, the |
56 | * state machine shouldn't touch these. | ||
57 | */ | ||
53 | void __iomem *addr; | 58 | void __iomem *addr; |
54 | int regspacing; | 59 | int regspacing; |
55 | int regsize; | 60 | int regsize; |
@@ -59,53 +64,67 @@ struct si_sm_io | |||
59 | }; | 64 | }; |
60 | 65 | ||
61 | /* Results of SMI events. */ | 66 | /* Results of SMI events. */ |
62 | enum si_sm_result | 67 | enum si_sm_result { |
63 | { | ||
64 | SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ | 68 | SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ |
65 | SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ | 69 | SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ |
66 | SI_SM_CALL_WITH_TICK_DELAY, /* Delay at least 1 tick before calling again. */ | 70 | SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */ |
67 | SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ | 71 | SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ |
68 | SI_SM_IDLE, /* The SM is in idle state. */ | 72 | SI_SM_IDLE, /* The SM is in idle state. */ |
69 | SI_SM_HOSED, /* The hardware violated the state machine. */ | 73 | SI_SM_HOSED, /* The hardware violated the state machine. */ |
70 | SI_SM_ATTN /* The hardware is asserting attn and the | 74 | |
71 | state machine is idle. */ | 75 | /* |
76 | * The hardware is asserting attn and the state machine is | ||
77 | * idle. | ||
78 | */ | ||
79 | SI_SM_ATTN | ||
72 | }; | 80 | }; |
73 | 81 | ||
74 | /* Handlers for the SMI state machine. */ | 82 | /* Handlers for the SMI state machine. */ |
75 | struct si_sm_handlers | 83 | struct si_sm_handlers { |
76 | { | 84 | /* |
77 | /* Put the version number of the state machine here so the | 85 | * Put the version number of the state machine here so the |
78 | upper layer can print it. */ | 86 | * upper layer can print it. |
87 | */ | ||
79 | char *version; | 88 | char *version; |
80 | 89 | ||
81 | /* Initialize the data and return the amount of I/O space to | 90 | /* |
82 | reserve for the space. */ | 91 | * Initialize the data and return the amount of I/O space to |
92 | * reserve for the space. | ||
93 | */ | ||
83 | unsigned int (*init_data)(struct si_sm_data *smi, | 94 | unsigned int (*init_data)(struct si_sm_data *smi, |
84 | struct si_sm_io *io); | 95 | struct si_sm_io *io); |
85 | 96 | ||
86 | /* Start a new transaction in the state machine. This will | 97 | /* |
87 | return -2 if the state machine is not idle, -1 if the size | 98 | * Start a new transaction in the state machine. This will |
88 | is invalid (to large or too small), or 0 if the transaction | 99 | * return -2 if the state machine is not idle, -1 if the size |
89 | is successfully completed. */ | 100 | * is invalid (to large or too small), or 0 if the transaction |
101 | * is successfully completed. | ||
102 | */ | ||
90 | int (*start_transaction)(struct si_sm_data *smi, | 103 | int (*start_transaction)(struct si_sm_data *smi, |
91 | unsigned char *data, unsigned int size); | 104 | unsigned char *data, unsigned int size); |
92 | 105 | ||
93 | /* Return the results after the transaction. This will return | 106 | /* |
94 | -1 if the buffer is too small, zero if no transaction is | 107 | * Return the results after the transaction. This will return |
95 | present, or the actual length of the result data. */ | 108 | * -1 if the buffer is too small, zero if no transaction is |
109 | * present, or the actual length of the result data. | ||
110 | */ | ||
96 | int (*get_result)(struct si_sm_data *smi, | 111 | int (*get_result)(struct si_sm_data *smi, |
97 | unsigned char *data, unsigned int length); | 112 | unsigned char *data, unsigned int length); |
98 | 113 | ||
99 | /* Call this periodically (for a polled interface) or upon | 114 | /* |
100 | receiving an interrupt (for a interrupt-driven interface). | 115 | * Call this periodically (for a polled interface) or upon |
101 | If interrupt driven, you should probably poll this | 116 | * receiving an interrupt (for a interrupt-driven interface). |
102 | periodically when not in idle state. This should be called | 117 | * If interrupt driven, you should probably poll this |
103 | with the time that passed since the last call, if it is | 118 | * periodically when not in idle state. This should be called |
104 | significant. Time is in microseconds. */ | 119 | * with the time that passed since the last call, if it is |
120 | * significant. Time is in microseconds. | ||
121 | */ | ||
105 | enum si_sm_result (*event)(struct si_sm_data *smi, long time); | 122 | enum si_sm_result (*event)(struct si_sm_data *smi, long time); |
106 | 123 | ||
107 | /* Attempt to detect an SMI. Returns 0 on success or nonzero | 124 | /* |
108 | on failure. */ | 125 | * Attempt to detect an SMI. Returns 0 on success or nonzero |
126 | * on failure. | ||
127 | */ | ||
109 | int (*detect)(struct si_sm_data *smi); | 128 | int (*detect)(struct si_sm_data *smi); |
110 | 129 | ||
111 | /* The interface is shutting down, so clean it up. */ | 130 | /* The interface is shutting down, so clean it up. */ |
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c index e64ea7d25d24..faed92971907 100644 --- a/drivers/char/ipmi/ipmi_smic_sm.c +++ b/drivers/char/ipmi/ipmi_smic_sm.c | |||
@@ -85,6 +85,7 @@ enum smic_states { | |||
85 | /* SMIC Flags Register Bits */ | 85 | /* SMIC Flags Register Bits */ |
86 | #define SMIC_RX_DATA_READY 0x80 | 86 | #define SMIC_RX_DATA_READY 0x80 |
87 | #define SMIC_TX_DATA_READY 0x40 | 87 | #define SMIC_TX_DATA_READY 0x40 |
88 | |||
88 | /* | 89 | /* |
89 | * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by | 90 | * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by |
90 | * a few systems, and then only by Systems Management | 91 | * a few systems, and then only by Systems Management |
@@ -104,23 +105,22 @@ enum smic_states { | |||
104 | #define EC_ILLEGAL_COMMAND 0x04 | 105 | #define EC_ILLEGAL_COMMAND 0x04 |
105 | #define EC_BUFFER_FULL 0x05 | 106 | #define EC_BUFFER_FULL 0x05 |
106 | 107 | ||
107 | struct si_sm_data | 108 | struct si_sm_data { |
108 | { | ||
109 | enum smic_states state; | 109 | enum smic_states state; |
110 | struct si_sm_io *io; | 110 | struct si_sm_io *io; |
111 | unsigned char write_data[MAX_SMIC_WRITE_SIZE]; | 111 | unsigned char write_data[MAX_SMIC_WRITE_SIZE]; |
112 | int write_pos; | 112 | int write_pos; |
113 | int write_count; | 113 | int write_count; |
114 | int orig_write_count; | 114 | int orig_write_count; |
115 | unsigned char read_data[MAX_SMIC_READ_SIZE]; | 115 | unsigned char read_data[MAX_SMIC_READ_SIZE]; |
116 | int read_pos; | 116 | int read_pos; |
117 | int truncated; | 117 | int truncated; |
118 | unsigned int error_retries; | 118 | unsigned int error_retries; |
119 | long smic_timeout; | 119 | long smic_timeout; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | static unsigned int init_smic_data (struct si_sm_data *smic, | 122 | static unsigned int init_smic_data(struct si_sm_data *smic, |
123 | struct si_sm_io *io) | 123 | struct si_sm_io *io) |
124 | { | 124 | { |
125 | smic->state = SMIC_IDLE; | 125 | smic->state = SMIC_IDLE; |
126 | smic->io = io; | 126 | smic->io = io; |
@@ -150,11 +150,10 @@ static int start_smic_transaction(struct si_sm_data *smic, | |||
150 | return IPMI_NOT_IN_MY_STATE_ERR; | 150 | return IPMI_NOT_IN_MY_STATE_ERR; |
151 | 151 | ||
152 | if (smic_debug & SMIC_DEBUG_MSG) { | 152 | if (smic_debug & SMIC_DEBUG_MSG) { |
153 | printk(KERN_INFO "start_smic_transaction -"); | 153 | printk(KERN_DEBUG "start_smic_transaction -"); |
154 | for (i = 0; i < size; i ++) { | 154 | for (i = 0; i < size; i++) |
155 | printk (" %02x", (unsigned char) (data [i])); | 155 | printk(" %02x", (unsigned char) data[i]); |
156 | } | 156 | printk("\n"); |
157 | printk ("\n"); | ||
158 | } | 157 | } |
159 | smic->error_retries = 0; | 158 | smic->error_retries = 0; |
160 | memcpy(smic->write_data, data, size); | 159 | memcpy(smic->write_data, data, size); |
@@ -173,11 +172,10 @@ static int smic_get_result(struct si_sm_data *smic, | |||
173 | int i; | 172 | int i; |
174 | 173 | ||
175 | if (smic_debug & SMIC_DEBUG_MSG) { | 174 | if (smic_debug & SMIC_DEBUG_MSG) { |
176 | printk (KERN_INFO "smic_get result -"); | 175 | printk(KERN_DEBUG "smic_get result -"); |
177 | for (i = 0; i < smic->read_pos; i ++) { | 176 | for (i = 0; i < smic->read_pos; i++) |
178 | printk (" %02x", (smic->read_data [i])); | 177 | printk(" %02x", smic->read_data[i]); |
179 | } | 178 | printk("\n"); |
180 | printk ("\n"); | ||
181 | } | 179 | } |
182 | if (length < smic->read_pos) { | 180 | if (length < smic->read_pos) { |
183 | smic->read_pos = length; | 181 | smic->read_pos = length; |
@@ -223,8 +221,8 @@ static inline void write_smic_control(struct si_sm_data *smic, | |||
223 | smic->io->outputb(smic->io, 1, control); | 221 | smic->io->outputb(smic->io, 1, control); |
224 | } | 222 | } |
225 | 223 | ||
226 | static inline void write_si_sm_data (struct si_sm_data *smic, | 224 | static inline void write_si_sm_data(struct si_sm_data *smic, |
227 | unsigned char data) | 225 | unsigned char data) |
228 | { | 226 | { |
229 | smic->io->outputb(smic->io, 0, data); | 227 | smic->io->outputb(smic->io, 0, data); |
230 | } | 228 | } |
@@ -233,10 +231,9 @@ static inline void start_error_recovery(struct si_sm_data *smic, char *reason) | |||
233 | { | 231 | { |
234 | (smic->error_retries)++; | 232 | (smic->error_retries)++; |
235 | if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { | 233 | if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { |
236 | if (smic_debug & SMIC_DEBUG_ENABLE) { | 234 | if (smic_debug & SMIC_DEBUG_ENABLE) |
237 | printk(KERN_WARNING | 235 | printk(KERN_WARNING |
238 | "ipmi_smic_drv: smic hosed: %s\n", reason); | 236 | "ipmi_smic_drv: smic hosed: %s\n", reason); |
239 | } | ||
240 | smic->state = SMIC_HOSED; | 237 | smic->state = SMIC_HOSED; |
241 | } else { | 238 | } else { |
242 | smic->write_count = smic->orig_write_count; | 239 | smic->write_count = smic->orig_write_count; |
@@ -254,14 +251,14 @@ static inline void write_next_byte(struct si_sm_data *smic) | |||
254 | (smic->write_count)--; | 251 | (smic->write_count)--; |
255 | } | 252 | } |
256 | 253 | ||
257 | static inline void read_next_byte (struct si_sm_data *smic) | 254 | static inline void read_next_byte(struct si_sm_data *smic) |
258 | { | 255 | { |
259 | if (smic->read_pos >= MAX_SMIC_READ_SIZE) { | 256 | if (smic->read_pos >= MAX_SMIC_READ_SIZE) { |
260 | read_smic_data (smic); | 257 | read_smic_data(smic); |
261 | smic->truncated = 1; | 258 | smic->truncated = 1; |
262 | } else { | 259 | } else { |
263 | smic->read_data[smic->read_pos] = read_smic_data(smic); | 260 | smic->read_data[smic->read_pos] = read_smic_data(smic); |
264 | (smic->read_pos)++; | 261 | smic->read_pos++; |
265 | } | 262 | } |
266 | } | 263 | } |
267 | 264 | ||
@@ -336,7 +333,7 @@ static inline void read_next_byte (struct si_sm_data *smic) | |||
336 | SMIC_SC_SMS_RD_END 0xC6 | 333 | SMIC_SC_SMS_RD_END 0xC6 |
337 | */ | 334 | */ |
338 | 335 | ||
339 | static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | 336 | static enum si_sm_result smic_event(struct si_sm_data *smic, long time) |
340 | { | 337 | { |
341 | unsigned char status; | 338 | unsigned char status; |
342 | unsigned char flags; | 339 | unsigned char flags; |
@@ -347,13 +344,15 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
347 | return SI_SM_HOSED; | 344 | return SI_SM_HOSED; |
348 | } | 345 | } |
349 | if (smic->state != SMIC_IDLE) { | 346 | if (smic->state != SMIC_IDLE) { |
350 | if (smic_debug & SMIC_DEBUG_STATES) { | 347 | if (smic_debug & SMIC_DEBUG_STATES) |
351 | printk(KERN_INFO | 348 | printk(KERN_DEBUG |
352 | "smic_event - smic->smic_timeout = %ld," | 349 | "smic_event - smic->smic_timeout = %ld," |
353 | " time = %ld\n", | 350 | " time = %ld\n", |
354 | smic->smic_timeout, time); | 351 | smic->smic_timeout, time); |
355 | } | 352 | /* |
356 | /* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */ | 353 | * FIXME: smic_event is sometimes called with time > |
354 | * SMIC_RETRY_TIMEOUT | ||
355 | */ | ||
357 | if (time < SMIC_RETRY_TIMEOUT) { | 356 | if (time < SMIC_RETRY_TIMEOUT) { |
358 | smic->smic_timeout -= time; | 357 | smic->smic_timeout -= time; |
359 | if (smic->smic_timeout < 0) { | 358 | if (smic->smic_timeout < 0) { |
@@ -366,9 +365,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
366 | if (flags & SMIC_FLAG_BSY) | 365 | if (flags & SMIC_FLAG_BSY) |
367 | return SI_SM_CALL_WITH_DELAY; | 366 | return SI_SM_CALL_WITH_DELAY; |
368 | 367 | ||
369 | status = read_smic_status (smic); | 368 | status = read_smic_status(smic); |
370 | if (smic_debug & SMIC_DEBUG_STATES) | 369 | if (smic_debug & SMIC_DEBUG_STATES) |
371 | printk(KERN_INFO | 370 | printk(KERN_DEBUG |
372 | "smic_event - state = %d, flags = 0x%02x," | 371 | "smic_event - state = %d, flags = 0x%02x," |
373 | " status = 0x%02x\n", | 372 | " status = 0x%02x\n", |
374 | smic->state, flags, status); | 373 | smic->state, flags, status); |
@@ -377,9 +376,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
377 | case SMIC_IDLE: | 376 | case SMIC_IDLE: |
378 | /* in IDLE we check for available messages */ | 377 | /* in IDLE we check for available messages */ |
379 | if (flags & SMIC_SMS_DATA_AVAIL) | 378 | if (flags & SMIC_SMS_DATA_AVAIL) |
380 | { | ||
381 | return SI_SM_ATTN; | 379 | return SI_SM_ATTN; |
382 | } | ||
383 | return SI_SM_IDLE; | 380 | return SI_SM_IDLE; |
384 | 381 | ||
385 | case SMIC_START_OP: | 382 | case SMIC_START_OP: |
@@ -391,7 +388,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
391 | 388 | ||
392 | case SMIC_OP_OK: | 389 | case SMIC_OP_OK: |
393 | if (status != SMIC_SC_SMS_READY) { | 390 | if (status != SMIC_SC_SMS_READY) { |
394 | /* this should not happen */ | 391 | /* this should not happen */ |
395 | start_error_recovery(smic, | 392 | start_error_recovery(smic, |
396 | "state = SMIC_OP_OK," | 393 | "state = SMIC_OP_OK," |
397 | " status != SMIC_SC_SMS_READY"); | 394 | " status != SMIC_SC_SMS_READY"); |
@@ -411,8 +408,10 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
411 | "status != SMIC_SC_SMS_WR_START"); | 408 | "status != SMIC_SC_SMS_WR_START"); |
412 | return SI_SM_CALL_WITH_DELAY; | 409 | return SI_SM_CALL_WITH_DELAY; |
413 | } | 410 | } |
414 | /* we must not issue WR_(NEXT|END) unless | 411 | /* |
415 | TX_DATA_READY is set */ | 412 | * we must not issue WR_(NEXT|END) unless |
413 | * TX_DATA_READY is set | ||
414 | * */ | ||
416 | if (flags & SMIC_TX_DATA_READY) { | 415 | if (flags & SMIC_TX_DATA_READY) { |
417 | if (smic->write_count == 1) { | 416 | if (smic->write_count == 1) { |
418 | /* last byte */ | 417 | /* last byte */ |
@@ -424,10 +423,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
424 | } | 423 | } |
425 | write_next_byte(smic); | 424 | write_next_byte(smic); |
426 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 425 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
427 | } | 426 | } else |
428 | else { | ||
429 | return SI_SM_CALL_WITH_DELAY; | 427 | return SI_SM_CALL_WITH_DELAY; |
430 | } | ||
431 | break; | 428 | break; |
432 | 429 | ||
433 | case SMIC_WRITE_NEXT: | 430 | case SMIC_WRITE_NEXT: |
@@ -442,52 +439,48 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
442 | if (smic->write_count == 1) { | 439 | if (smic->write_count == 1) { |
443 | write_smic_control(smic, SMIC_CC_SMS_WR_END); | 440 | write_smic_control(smic, SMIC_CC_SMS_WR_END); |
444 | smic->state = SMIC_WRITE_END; | 441 | smic->state = SMIC_WRITE_END; |
445 | } | 442 | } else { |
446 | else { | ||
447 | write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); | 443 | write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); |
448 | smic->state = SMIC_WRITE_NEXT; | 444 | smic->state = SMIC_WRITE_NEXT; |
449 | } | 445 | } |
450 | write_next_byte(smic); | 446 | write_next_byte(smic); |
451 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 447 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
452 | } | 448 | } else |
453 | else { | ||
454 | return SI_SM_CALL_WITH_DELAY; | 449 | return SI_SM_CALL_WITH_DELAY; |
455 | } | ||
456 | break; | 450 | break; |
457 | 451 | ||
458 | case SMIC_WRITE_END: | 452 | case SMIC_WRITE_END: |
459 | if (status != SMIC_SC_SMS_WR_END) { | 453 | if (status != SMIC_SC_SMS_WR_END) { |
460 | start_error_recovery (smic, | 454 | start_error_recovery(smic, |
461 | "state = SMIC_WRITE_END, " | 455 | "state = SMIC_WRITE_END, " |
462 | "status != SMIC_SC_SMS_WR_END"); | 456 | "status != SMIC_SC_SMS_WR_END"); |
463 | return SI_SM_CALL_WITH_DELAY; | 457 | return SI_SM_CALL_WITH_DELAY; |
464 | } | 458 | } |
465 | /* data register holds an error code */ | 459 | /* data register holds an error code */ |
466 | data = read_smic_data(smic); | 460 | data = read_smic_data(smic); |
467 | if (data != 0) { | 461 | if (data != 0) { |
468 | if (smic_debug & SMIC_DEBUG_ENABLE) { | 462 | if (smic_debug & SMIC_DEBUG_ENABLE) |
469 | printk(KERN_INFO | 463 | printk(KERN_DEBUG |
470 | "SMIC_WRITE_END: data = %02x\n", data); | 464 | "SMIC_WRITE_END: data = %02x\n", data); |
471 | } | ||
472 | start_error_recovery(smic, | 465 | start_error_recovery(smic, |
473 | "state = SMIC_WRITE_END, " | 466 | "state = SMIC_WRITE_END, " |
474 | "data != SUCCESS"); | 467 | "data != SUCCESS"); |
475 | return SI_SM_CALL_WITH_DELAY; | 468 | return SI_SM_CALL_WITH_DELAY; |
476 | } else { | 469 | } else |
477 | smic->state = SMIC_WRITE2READ; | 470 | smic->state = SMIC_WRITE2READ; |
478 | } | ||
479 | break; | 471 | break; |
480 | 472 | ||
481 | case SMIC_WRITE2READ: | 473 | case SMIC_WRITE2READ: |
482 | /* we must wait for RX_DATA_READY to be set before we | 474 | /* |
483 | can continue */ | 475 | * we must wait for RX_DATA_READY to be set before we |
476 | * can continue | ||
477 | */ | ||
484 | if (flags & SMIC_RX_DATA_READY) { | 478 | if (flags & SMIC_RX_DATA_READY) { |
485 | write_smic_control(smic, SMIC_CC_SMS_RD_START); | 479 | write_smic_control(smic, SMIC_CC_SMS_RD_START); |
486 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 480 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
487 | smic->state = SMIC_READ_START; | 481 | smic->state = SMIC_READ_START; |
488 | } else { | 482 | } else |
489 | return SI_SM_CALL_WITH_DELAY; | 483 | return SI_SM_CALL_WITH_DELAY; |
490 | } | ||
491 | break; | 484 | break; |
492 | 485 | ||
493 | case SMIC_READ_START: | 486 | case SMIC_READ_START: |
@@ -502,15 +495,16 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
502 | write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); | 495 | write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); |
503 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 496 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
504 | smic->state = SMIC_READ_NEXT; | 497 | smic->state = SMIC_READ_NEXT; |
505 | } else { | 498 | } else |
506 | return SI_SM_CALL_WITH_DELAY; | 499 | return SI_SM_CALL_WITH_DELAY; |
507 | } | ||
508 | break; | 500 | break; |
509 | 501 | ||
510 | case SMIC_READ_NEXT: | 502 | case SMIC_READ_NEXT: |
511 | switch (status) { | 503 | switch (status) { |
512 | /* smic tells us that this is the last byte to be read | 504 | /* |
513 | --> clean up */ | 505 | * smic tells us that this is the last byte to be read |
506 | * --> clean up | ||
507 | */ | ||
514 | case SMIC_SC_SMS_RD_END: | 508 | case SMIC_SC_SMS_RD_END: |
515 | read_next_byte(smic); | 509 | read_next_byte(smic); |
516 | write_smic_control(smic, SMIC_CC_SMS_RD_END); | 510 | write_smic_control(smic, SMIC_CC_SMS_RD_END); |
@@ -523,9 +517,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
523 | write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); | 517 | write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); |
524 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 518 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
525 | smic->state = SMIC_READ_NEXT; | 519 | smic->state = SMIC_READ_NEXT; |
526 | } else { | 520 | } else |
527 | return SI_SM_CALL_WITH_DELAY; | 521 | return SI_SM_CALL_WITH_DELAY; |
528 | } | ||
529 | break; | 522 | break; |
530 | default: | 523 | default: |
531 | start_error_recovery( | 524 | start_error_recovery( |
@@ -546,10 +539,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
546 | data = read_smic_data(smic); | 539 | data = read_smic_data(smic); |
547 | /* data register holds an error code */ | 540 | /* data register holds an error code */ |
548 | if (data != 0) { | 541 | if (data != 0) { |
549 | if (smic_debug & SMIC_DEBUG_ENABLE) { | 542 | if (smic_debug & SMIC_DEBUG_ENABLE) |
550 | printk(KERN_INFO | 543 | printk(KERN_DEBUG |
551 | "SMIC_READ_END: data = %02x\n", data); | 544 | "SMIC_READ_END: data = %02x\n", data); |
552 | } | ||
553 | start_error_recovery(smic, | 545 | start_error_recovery(smic, |
554 | "state = SMIC_READ_END, " | 546 | "state = SMIC_READ_END, " |
555 | "data != SUCCESS"); | 547 | "data != SUCCESS"); |
@@ -565,7 +557,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
565 | 557 | ||
566 | default: | 558 | default: |
567 | if (smic_debug & SMIC_DEBUG_ENABLE) { | 559 | if (smic_debug & SMIC_DEBUG_ENABLE) { |
568 | printk(KERN_WARNING "smic->state = %d\n", smic->state); | 560 | printk(KERN_DEBUG "smic->state = %d\n", smic->state); |
569 | start_error_recovery(smic, "state = UNKNOWN"); | 561 | start_error_recovery(smic, "state = UNKNOWN"); |
570 | return SI_SM_CALL_WITH_DELAY; | 562 | return SI_SM_CALL_WITH_DELAY; |
571 | } | 563 | } |
@@ -576,10 +568,12 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
576 | 568 | ||
577 | static int smic_detect(struct si_sm_data *smic) | 569 | static int smic_detect(struct si_sm_data *smic) |
578 | { | 570 | { |
579 | /* It's impossible for the SMIC fnags register to be all 1's, | 571 | /* |
580 | (assuming a properly functioning, self-initialized BMC) | 572 | * It's impossible for the SMIC fnags register to be all 1's, |
581 | but that's what you get from reading a bogus address, so we | 573 | * (assuming a properly functioning, self-initialized BMC) |
582 | test that first. */ | 574 | * but that's what you get from reading a bogus address, so we |
575 | * test that first. | ||
576 | */ | ||
583 | if (read_smic_flags(smic) == 0xff) | 577 | if (read_smic_flags(smic) == 0xff) |
584 | return 1; | 578 | return 1; |
585 | 579 | ||
@@ -595,8 +589,7 @@ static int smic_size(void) | |||
595 | return sizeof(struct si_sm_data); | 589 | return sizeof(struct si_sm_data); |
596 | } | 590 | } |
597 | 591 | ||
598 | struct si_sm_handlers smic_smi_handlers = | 592 | struct si_sm_handlers smic_smi_handlers = { |
599 | { | ||
600 | .init_data = init_smic_data, | 593 | .init_data = init_smic_data, |
601 | .start_transaction = start_smic_transaction, | 594 | .start_transaction = start_smic_transaction, |
602 | .get_result = smic_get_result, | 595 | .get_result = smic_get_result, |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 8f45ca9235ad..1b9a87047817 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
@@ -54,13 +54,15 @@ | |||
54 | #include <asm/atomic.h> | 54 | #include <asm/atomic.h> |
55 | 55 | ||
56 | #ifdef CONFIG_X86 | 56 | #ifdef CONFIG_X86 |
57 | /* This is ugly, but I've determined that x86 is the only architecture | 57 | /* |
58 | that can reasonably support the IPMI NMI watchdog timeout at this | 58 | * This is ugly, but I've determined that x86 is the only architecture |
59 | time. If another architecture adds this capability somehow, it | 59 | * that can reasonably support the IPMI NMI watchdog timeout at this |
60 | will have to be a somewhat different mechanism and I have no idea | 60 | * time. If another architecture adds this capability somehow, it |
61 | how it will work. So in the unlikely event that another | 61 | * will have to be a somewhat different mechanism and I have no idea |
62 | architecture supports this, we can figure out a good generic | 62 | * how it will work. So in the unlikely event that another |
63 | mechanism for it at that time. */ | 63 | * architecture supports this, we can figure out a good generic |
64 | * mechanism for it at that time. | ||
65 | */ | ||
64 | #include <asm/kdebug.h> | 66 | #include <asm/kdebug.h> |
65 | #define HAVE_DIE_NMI | 67 | #define HAVE_DIE_NMI |
66 | #endif | 68 | #endif |
@@ -95,9 +97,8 @@ | |||
95 | /* Operations that can be performed on a pretimout. */ | 97 | /* Operations that can be performed on a pretimout. */ |
96 | #define WDOG_PREOP_NONE 0 | 98 | #define WDOG_PREOP_NONE 0 |
97 | #define WDOG_PREOP_PANIC 1 | 99 | #define WDOG_PREOP_PANIC 1 |
98 | #define WDOG_PREOP_GIVE_DATA 2 /* Cause data to be available to | 100 | /* Cause data to be available to read. Doesn't work in NMI mode. */ |
99 | read. Doesn't work in NMI | 101 | #define WDOG_PREOP_GIVE_DATA 2 |
100 | mode. */ | ||
101 | 102 | ||
102 | /* Actions to perform on a full timeout. */ | 103 | /* Actions to perform on a full timeout. */ |
103 | #define WDOG_SET_TIMEOUT_ACT(byte, use) \ | 104 | #define WDOG_SET_TIMEOUT_ACT(byte, use) \ |
@@ -108,8 +109,10 @@ | |||
108 | #define WDOG_TIMEOUT_POWER_DOWN 2 | 109 | #define WDOG_TIMEOUT_POWER_DOWN 2 |
109 | #define WDOG_TIMEOUT_POWER_CYCLE 3 | 110 | #define WDOG_TIMEOUT_POWER_CYCLE 3 |
110 | 111 | ||
111 | /* Byte 3 of the get command, byte 4 of the get response is the | 112 | /* |
112 | pre-timeout in seconds. */ | 113 | * Byte 3 of the get command, byte 4 of the get response is the |
114 | * pre-timeout in seconds. | ||
115 | */ | ||
113 | 116 | ||
114 | /* Bits for setting byte 4 of the set command, byte 5 of the get response. */ | 117 | /* Bits for setting byte 4 of the set command, byte 5 of the get response. */ |
115 | #define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1) | 118 | #define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1) |
@@ -118,11 +121,13 @@ | |||
118 | #define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4) | 121 | #define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4) |
119 | #define WDOG_EXPIRE_CLEAR_OEM (1 << 5) | 122 | #define WDOG_EXPIRE_CLEAR_OEM (1 << 5) |
120 | 123 | ||
121 | /* Setting/getting the watchdog timer value. This is for bytes 5 and | 124 | /* |
122 | 6 (the timeout time) of the set command, and bytes 6 and 7 (the | 125 | * Setting/getting the watchdog timer value. This is for bytes 5 and |
123 | timeout time) and 8 and 9 (the current countdown value) of the | 126 | * 6 (the timeout time) of the set command, and bytes 6 and 7 (the |
124 | response. The timeout value is given in seconds (in the command it | 127 | * timeout time) and 8 and 9 (the current countdown value) of the |
125 | is 100ms intervals). */ | 128 | * response. The timeout value is given in seconds (in the command it |
129 | * is 100ms intervals). | ||
130 | */ | ||
126 | #define WDOG_SET_TIMEOUT(byte1, byte2, val) \ | 131 | #define WDOG_SET_TIMEOUT(byte1, byte2, val) \ |
127 | (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8) | 132 | (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8) |
128 | #define WDOG_GET_TIMEOUT(byte1, byte2) \ | 133 | #define WDOG_GET_TIMEOUT(byte1, byte2) \ |
@@ -184,8 +189,10 @@ static int ipmi_set_timeout(int do_heartbeat); | |||
184 | static void ipmi_register_watchdog(int ipmi_intf); | 189 | static void ipmi_register_watchdog(int ipmi_intf); |
185 | static void ipmi_unregister_watchdog(int ipmi_intf); | 190 | static void ipmi_unregister_watchdog(int ipmi_intf); |
186 | 191 | ||
187 | /* If true, the driver will start running as soon as it is configured | 192 | /* |
188 | and ready. */ | 193 | * If true, the driver will start running as soon as it is configured |
194 | * and ready. | ||
195 | */ | ||
189 | static int start_now; | 196 | static int start_now; |
190 | 197 | ||
191 | static int set_param_int(const char *val, struct kernel_param *kp) | 198 | static int set_param_int(const char *val, struct kernel_param *kp) |
@@ -309,10 +316,12 @@ static int ipmi_ignore_heartbeat; | |||
309 | /* Is someone using the watchdog? Only one user is allowed. */ | 316 | /* Is someone using the watchdog? Only one user is allowed. */ |
310 | static unsigned long ipmi_wdog_open; | 317 | static unsigned long ipmi_wdog_open; |
311 | 318 | ||
312 | /* If set to 1, the heartbeat command will set the state to reset and | 319 | /* |
313 | start the timer. The timer doesn't normally run when the driver is | 320 | * If set to 1, the heartbeat command will set the state to reset and |
314 | first opened until the heartbeat is set the first time, this | 321 | * start the timer. The timer doesn't normally run when the driver is |
315 | variable is used to accomplish this. */ | 322 | * first opened until the heartbeat is set the first time, this |
323 | * variable is used to accomplish this. | ||
324 | */ | ||
316 | static int ipmi_start_timer_on_heartbeat; | 325 | static int ipmi_start_timer_on_heartbeat; |
317 | 326 | ||
318 | /* IPMI version of the BMC. */ | 327 | /* IPMI version of the BMC. */ |
@@ -329,10 +338,12 @@ static int nmi_handler_registered; | |||
329 | 338 | ||
330 | static int ipmi_heartbeat(void); | 339 | static int ipmi_heartbeat(void); |
331 | 340 | ||
332 | /* We use a mutex to make sure that only one thing can send a set | 341 | /* |
333 | timeout at one time, because we only have one copy of the data. | 342 | * We use a mutex to make sure that only one thing can send a set |
334 | The mutex is claimed when the set_timeout is sent and freed | 343 | * timeout at one time, because we only have one copy of the data. |
335 | when both messages are free. */ | 344 | * The mutex is claimed when the set_timeout is sent and freed |
345 | * when both messages are free. | ||
346 | */ | ||
336 | static atomic_t set_timeout_tofree = ATOMIC_INIT(0); | 347 | static atomic_t set_timeout_tofree = ATOMIC_INIT(0); |
337 | static DEFINE_MUTEX(set_timeout_lock); | 348 | static DEFINE_MUTEX(set_timeout_lock); |
338 | static DECLARE_COMPLETION(set_timeout_wait); | 349 | static DECLARE_COMPLETION(set_timeout_wait); |
@@ -346,15 +357,13 @@ static void set_timeout_free_recv(struct ipmi_recv_msg *msg) | |||
346 | if (atomic_dec_and_test(&set_timeout_tofree)) | 357 | if (atomic_dec_and_test(&set_timeout_tofree)) |
347 | complete(&set_timeout_wait); | 358 | complete(&set_timeout_wait); |
348 | } | 359 | } |
349 | static struct ipmi_smi_msg set_timeout_smi_msg = | 360 | static struct ipmi_smi_msg set_timeout_smi_msg = { |
350 | { | ||
351 | .done = set_timeout_free_smi | 361 | .done = set_timeout_free_smi |
352 | }; | 362 | }; |
353 | static struct ipmi_recv_msg set_timeout_recv_msg = | 363 | static struct ipmi_recv_msg set_timeout_recv_msg = { |
354 | { | ||
355 | .done = set_timeout_free_recv | 364 | .done = set_timeout_free_recv |
356 | }; | 365 | }; |
357 | 366 | ||
358 | static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, | 367 | static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, |
359 | struct ipmi_recv_msg *recv_msg, | 368 | struct ipmi_recv_msg *recv_msg, |
360 | int *send_heartbeat_now) | 369 | int *send_heartbeat_now) |
@@ -373,13 +382,14 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, | |||
373 | WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); | 382 | WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); |
374 | 383 | ||
375 | if ((ipmi_version_major > 1) | 384 | if ((ipmi_version_major > 1) |
376 | || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) | 385 | || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { |
377 | { | ||
378 | /* This is an IPMI 1.5-only feature. */ | 386 | /* This is an IPMI 1.5-only feature. */ |
379 | data[0] |= WDOG_DONT_STOP_ON_SET; | 387 | data[0] |= WDOG_DONT_STOP_ON_SET; |
380 | } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { | 388 | } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { |
381 | /* In ipmi 1.0, setting the timer stops the watchdog, we | 389 | /* |
382 | need to start it back up again. */ | 390 | * In ipmi 1.0, setting the timer stops the watchdog, we |
391 | * need to start it back up again. | ||
392 | */ | ||
383 | hbnow = 1; | 393 | hbnow = 1; |
384 | } | 394 | } |
385 | 395 | ||
@@ -465,12 +475,10 @@ static void panic_recv_free(struct ipmi_recv_msg *msg) | |||
465 | atomic_dec(&panic_done_count); | 475 | atomic_dec(&panic_done_count); |
466 | } | 476 | } |
467 | 477 | ||
468 | static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = | 478 | static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = { |
469 | { | ||
470 | .done = panic_smi_free | 479 | .done = panic_smi_free |
471 | }; | 480 | }; |
472 | static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = | 481 | static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = { |
473 | { | ||
474 | .done = panic_recv_free | 482 | .done = panic_recv_free |
475 | }; | 483 | }; |
476 | 484 | ||
@@ -480,8 +488,10 @@ static void panic_halt_ipmi_heartbeat(void) | |||
480 | struct ipmi_system_interface_addr addr; | 488 | struct ipmi_system_interface_addr addr; |
481 | int rv; | 489 | int rv; |
482 | 490 | ||
483 | /* Don't reset the timer if we have the timer turned off, that | 491 | /* |
484 | re-enables the watchdog. */ | 492 | * Don't reset the timer if we have the timer turned off, that |
493 | * re-enables the watchdog. | ||
494 | */ | ||
485 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) | 495 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) |
486 | return; | 496 | return; |
487 | 497 | ||
@@ -505,19 +515,19 @@ static void panic_halt_ipmi_heartbeat(void) | |||
505 | atomic_add(2, &panic_done_count); | 515 | atomic_add(2, &panic_done_count); |
506 | } | 516 | } |
507 | 517 | ||
508 | static struct ipmi_smi_msg panic_halt_smi_msg = | 518 | static struct ipmi_smi_msg panic_halt_smi_msg = { |
509 | { | ||
510 | .done = panic_smi_free | 519 | .done = panic_smi_free |
511 | }; | 520 | }; |
512 | static struct ipmi_recv_msg panic_halt_recv_msg = | 521 | static struct ipmi_recv_msg panic_halt_recv_msg = { |
513 | { | ||
514 | .done = panic_recv_free | 522 | .done = panic_recv_free |
515 | }; | 523 | }; |
516 | 524 | ||
517 | /* Special call, doesn't claim any locks. This is only to be called | 525 | /* |
518 | at panic or halt time, in run-to-completion mode, when the caller | 526 | * Special call, doesn't claim any locks. This is only to be called |
519 | is the only CPU and the only thing that will be going is these IPMI | 527 | * at panic or halt time, in run-to-completion mode, when the caller |
520 | calls. */ | 528 | * is the only CPU and the only thing that will be going is these IPMI |
529 | * calls. | ||
530 | */ | ||
521 | static void panic_halt_ipmi_set_timeout(void) | 531 | static void panic_halt_ipmi_set_timeout(void) |
522 | { | 532 | { |
523 | int send_heartbeat_now; | 533 | int send_heartbeat_now; |
@@ -540,10 +550,12 @@ static void panic_halt_ipmi_set_timeout(void) | |||
540 | ipmi_poll_interface(watchdog_user); | 550 | ipmi_poll_interface(watchdog_user); |
541 | } | 551 | } |
542 | 552 | ||
543 | /* We use a semaphore to make sure that only one thing can send a | 553 | /* |
544 | heartbeat at one time, because we only have one copy of the data. | 554 | * We use a mutex to make sure that only one thing can send a |
545 | The semaphore is claimed when the set_timeout is sent and freed | 555 | * heartbeat at one time, because we only have one copy of the data. |
546 | when both messages are free. */ | 556 | * The semaphore is claimed when the set_timeout is sent and freed |
557 | * when both messages are free. | ||
558 | */ | ||
547 | static atomic_t heartbeat_tofree = ATOMIC_INIT(0); | 559 | static atomic_t heartbeat_tofree = ATOMIC_INIT(0); |
548 | static DEFINE_MUTEX(heartbeat_lock); | 560 | static DEFINE_MUTEX(heartbeat_lock); |
549 | static DECLARE_COMPLETION(heartbeat_wait); | 561 | static DECLARE_COMPLETION(heartbeat_wait); |
@@ -557,15 +569,13 @@ static void heartbeat_free_recv(struct ipmi_recv_msg *msg) | |||
557 | if (atomic_dec_and_test(&heartbeat_tofree)) | 569 | if (atomic_dec_and_test(&heartbeat_tofree)) |
558 | complete(&heartbeat_wait); | 570 | complete(&heartbeat_wait); |
559 | } | 571 | } |
560 | static struct ipmi_smi_msg heartbeat_smi_msg = | 572 | static struct ipmi_smi_msg heartbeat_smi_msg = { |
561 | { | ||
562 | .done = heartbeat_free_smi | 573 | .done = heartbeat_free_smi |
563 | }; | 574 | }; |
564 | static struct ipmi_recv_msg heartbeat_recv_msg = | 575 | static struct ipmi_recv_msg heartbeat_recv_msg = { |
565 | { | ||
566 | .done = heartbeat_free_recv | 576 | .done = heartbeat_free_recv |
567 | }; | 577 | }; |
568 | 578 | ||
569 | static int ipmi_heartbeat(void) | 579 | static int ipmi_heartbeat(void) |
570 | { | 580 | { |
571 | struct kernel_ipmi_msg msg; | 581 | struct kernel_ipmi_msg msg; |
@@ -580,10 +590,12 @@ static int ipmi_heartbeat(void) | |||
580 | ipmi_watchdog_state = action_val; | 590 | ipmi_watchdog_state = action_val; |
581 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | 591 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); |
582 | } else if (pretimeout_since_last_heartbeat) { | 592 | } else if (pretimeout_since_last_heartbeat) { |
583 | /* A pretimeout occurred, make sure we set the timeout. | 593 | /* |
584 | We don't want to set the action, though, we want to | 594 | * A pretimeout occurred, make sure we set the timeout. |
585 | leave that alone (thus it can't be combined with the | 595 | * We don't want to set the action, though, we want to |
586 | above operation. */ | 596 | * leave that alone (thus it can't be combined with the |
597 | * above operation. | ||
598 | */ | ||
587 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); | 599 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); |
588 | } | 600 | } |
589 | 601 | ||
@@ -591,8 +603,10 @@ static int ipmi_heartbeat(void) | |||
591 | 603 | ||
592 | atomic_set(&heartbeat_tofree, 2); | 604 | atomic_set(&heartbeat_tofree, 2); |
593 | 605 | ||
594 | /* Don't reset the timer if we have the timer turned off, that | 606 | /* |
595 | re-enables the watchdog. */ | 607 | * Don't reset the timer if we have the timer turned off, that |
608 | * re-enables the watchdog. | ||
609 | */ | ||
596 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) { | 610 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) { |
597 | mutex_unlock(&heartbeat_lock); | 611 | mutex_unlock(&heartbeat_lock); |
598 | return 0; | 612 | return 0; |
@@ -625,10 +639,12 @@ static int ipmi_heartbeat(void) | |||
625 | wait_for_completion(&heartbeat_wait); | 639 | wait_for_completion(&heartbeat_wait); |
626 | 640 | ||
627 | if (heartbeat_recv_msg.msg.data[0] != 0) { | 641 | if (heartbeat_recv_msg.msg.data[0] != 0) { |
628 | /* Got an error in the heartbeat response. It was already | 642 | /* |
629 | reported in ipmi_wdog_msg_handler, but we should return | 643 | * Got an error in the heartbeat response. It was already |
630 | an error here. */ | 644 | * reported in ipmi_wdog_msg_handler, but we should return |
631 | rv = -EINVAL; | 645 | * an error here. |
646 | */ | ||
647 | rv = -EINVAL; | ||
632 | } | 648 | } |
633 | 649 | ||
634 | mutex_unlock(&heartbeat_lock); | 650 | mutex_unlock(&heartbeat_lock); |
@@ -636,8 +652,7 @@ static int ipmi_heartbeat(void) | |||
636 | return rv; | 652 | return rv; |
637 | } | 653 | } |
638 | 654 | ||
639 | static struct watchdog_info ident = | 655 | static struct watchdog_info ident = { |
640 | { | ||
641 | .options = 0, /* WDIOF_SETTIMEOUT, */ | 656 | .options = 0, /* WDIOF_SETTIMEOUT, */ |
642 | .firmware_version = 1, | 657 | .firmware_version = 1, |
643 | .identity = "IPMI" | 658 | .identity = "IPMI" |
@@ -650,7 +665,7 @@ static int ipmi_ioctl(struct inode *inode, struct file *file, | |||
650 | int i; | 665 | int i; |
651 | int val; | 666 | int val; |
652 | 667 | ||
653 | switch(cmd) { | 668 | switch (cmd) { |
654 | case WDIOC_GETSUPPORT: | 669 | case WDIOC_GETSUPPORT: |
655 | i = copy_to_user(argp, &ident, sizeof(ident)); | 670 | i = copy_to_user(argp, &ident, sizeof(ident)); |
656 | return i ? -EFAULT : 0; | 671 | return i ? -EFAULT : 0; |
@@ -690,15 +705,13 @@ static int ipmi_ioctl(struct inode *inode, struct file *file, | |||
690 | i = copy_from_user(&val, argp, sizeof(int)); | 705 | i = copy_from_user(&val, argp, sizeof(int)); |
691 | if (i) | 706 | if (i) |
692 | return -EFAULT; | 707 | return -EFAULT; |
693 | if (val & WDIOS_DISABLECARD) | 708 | if (val & WDIOS_DISABLECARD) { |
694 | { | ||
695 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; | 709 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; |
696 | ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); | 710 | ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); |
697 | ipmi_start_timer_on_heartbeat = 0; | 711 | ipmi_start_timer_on_heartbeat = 0; |
698 | } | 712 | } |
699 | 713 | ||
700 | if (val & WDIOS_ENABLECARD) | 714 | if (val & WDIOS_ENABLECARD) { |
701 | { | ||
702 | ipmi_watchdog_state = action_val; | 715 | ipmi_watchdog_state = action_val; |
703 | ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | 716 | ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); |
704 | } | 717 | } |
@@ -724,13 +737,13 @@ static ssize_t ipmi_write(struct file *file, | |||
724 | int rv; | 737 | int rv; |
725 | 738 | ||
726 | if (len) { | 739 | if (len) { |
727 | if (!nowayout) { | 740 | if (!nowayout) { |
728 | size_t i; | 741 | size_t i; |
729 | 742 | ||
730 | /* In case it was set long ago */ | 743 | /* In case it was set long ago */ |
731 | expect_close = 0; | 744 | expect_close = 0; |
732 | 745 | ||
733 | for (i = 0; i != len; i++) { | 746 | for (i = 0; i != len; i++) { |
734 | char c; | 747 | char c; |
735 | 748 | ||
736 | if (get_user(c, buf + i)) | 749 | if (get_user(c, buf + i)) |
@@ -758,15 +771,17 @@ static ssize_t ipmi_read(struct file *file, | |||
758 | if (count <= 0) | 771 | if (count <= 0) |
759 | return 0; | 772 | return 0; |
760 | 773 | ||
761 | /* Reading returns if the pretimeout has gone off, and it only does | 774 | /* |
762 | it once per pretimeout. */ | 775 | * Reading returns if the pretimeout has gone off, and it only does |
776 | * it once per pretimeout. | ||
777 | */ | ||
763 | spin_lock(&ipmi_read_lock); | 778 | spin_lock(&ipmi_read_lock); |
764 | if (!data_to_read) { | 779 | if (!data_to_read) { |
765 | if (file->f_flags & O_NONBLOCK) { | 780 | if (file->f_flags & O_NONBLOCK) { |
766 | rv = -EAGAIN; | 781 | rv = -EAGAIN; |
767 | goto out; | 782 | goto out; |
768 | } | 783 | } |
769 | 784 | ||
770 | init_waitqueue_entry(&wait, current); | 785 | init_waitqueue_entry(&wait, current); |
771 | add_wait_queue(&read_q, &wait); | 786 | add_wait_queue(&read_q, &wait); |
772 | while (!data_to_read) { | 787 | while (!data_to_read) { |
@@ -776,7 +791,7 @@ static ssize_t ipmi_read(struct file *file, | |||
776 | spin_lock(&ipmi_read_lock); | 791 | spin_lock(&ipmi_read_lock); |
777 | } | 792 | } |
778 | remove_wait_queue(&read_q, &wait); | 793 | remove_wait_queue(&read_q, &wait); |
779 | 794 | ||
780 | if (signal_pending(current)) { | 795 | if (signal_pending(current)) { |
781 | rv = -ERESTARTSYS; | 796 | rv = -ERESTARTSYS; |
782 | goto out; | 797 | goto out; |
@@ -799,25 +814,27 @@ static ssize_t ipmi_read(struct file *file, | |||
799 | 814 | ||
800 | static int ipmi_open(struct inode *ino, struct file *filep) | 815 | static int ipmi_open(struct inode *ino, struct file *filep) |
801 | { | 816 | { |
802 | switch (iminor(ino)) { | 817 | switch (iminor(ino)) { |
803 | case WATCHDOG_MINOR: | 818 | case WATCHDOG_MINOR: |
804 | if (test_and_set_bit(0, &ipmi_wdog_open)) | 819 | if (test_and_set_bit(0, &ipmi_wdog_open)) |
805 | return -EBUSY; | 820 | return -EBUSY; |
806 | 821 | ||
807 | /* Don't start the timer now, let it start on the | 822 | /* |
808 | first heartbeat. */ | 823 | * Don't start the timer now, let it start on the |
824 | * first heartbeat. | ||
825 | */ | ||
809 | ipmi_start_timer_on_heartbeat = 1; | 826 | ipmi_start_timer_on_heartbeat = 1; |
810 | return nonseekable_open(ino, filep); | 827 | return nonseekable_open(ino, filep); |
811 | 828 | ||
812 | default: | 829 | default: |
813 | return (-ENODEV); | 830 | return (-ENODEV); |
814 | } | 831 | } |
815 | } | 832 | } |
816 | 833 | ||
817 | static unsigned int ipmi_poll(struct file *file, poll_table *wait) | 834 | static unsigned int ipmi_poll(struct file *file, poll_table *wait) |
818 | { | 835 | { |
819 | unsigned int mask = 0; | 836 | unsigned int mask = 0; |
820 | 837 | ||
821 | poll_wait(file, &read_q, wait); | 838 | poll_wait(file, &read_q, wait); |
822 | 839 | ||
823 | spin_lock(&ipmi_read_lock); | 840 | spin_lock(&ipmi_read_lock); |
@@ -851,7 +868,7 @@ static int ipmi_close(struct inode *ino, struct file *filep) | |||
851 | clear_bit(0, &ipmi_wdog_open); | 868 | clear_bit(0, &ipmi_wdog_open); |
852 | } | 869 | } |
853 | 870 | ||
854 | ipmi_fasync (-1, filep, 0); | 871 | ipmi_fasync(-1, filep, 0); |
855 | expect_close = 0; | 872 | expect_close = 0; |
856 | 873 | ||
857 | return 0; | 874 | return 0; |
@@ -882,7 +899,7 @@ static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, | |||
882 | msg->msg.data[0], | 899 | msg->msg.data[0], |
883 | msg->msg.cmd); | 900 | msg->msg.cmd); |
884 | } | 901 | } |
885 | 902 | ||
886 | ipmi_free_recv_msg(msg); | 903 | ipmi_free_recv_msg(msg); |
887 | } | 904 | } |
888 | 905 | ||
@@ -902,14 +919,14 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data) | |||
902 | } | 919 | } |
903 | } | 920 | } |
904 | 921 | ||
905 | /* On some machines, the heartbeat will give | 922 | /* |
906 | an error and not work unless we re-enable | 923 | * On some machines, the heartbeat will give an error and not |
907 | the timer. So do so. */ | 924 | * work unless we re-enable the timer. So do so. |
925 | */ | ||
908 | pretimeout_since_last_heartbeat = 1; | 926 | pretimeout_since_last_heartbeat = 1; |
909 | } | 927 | } |
910 | 928 | ||
911 | static struct ipmi_user_hndl ipmi_hndlrs = | 929 | static struct ipmi_user_hndl ipmi_hndlrs = { |
912 | { | ||
913 | .ipmi_recv_hndl = ipmi_wdog_msg_handler, | 930 | .ipmi_recv_hndl = ipmi_wdog_msg_handler, |
914 | .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler | 931 | .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler |
915 | }; | 932 | }; |
@@ -949,8 +966,10 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
949 | int old_timeout = timeout; | 966 | int old_timeout = timeout; |
950 | int old_preop_val = preop_val; | 967 | int old_preop_val = preop_val; |
951 | 968 | ||
952 | /* Set the pretimeout to go off in a second and give | 969 | /* |
953 | ourselves plenty of time to stop the timer. */ | 970 | * Set the pretimeout to go off in a second and give |
971 | * ourselves plenty of time to stop the timer. | ||
972 | */ | ||
954 | ipmi_watchdog_state = WDOG_TIMEOUT_RESET; | 973 | ipmi_watchdog_state = WDOG_TIMEOUT_RESET; |
955 | preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */ | 974 | preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */ |
956 | pretimeout = 99; | 975 | pretimeout = 99; |
@@ -974,7 +993,7 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
974 | " occur. The NMI pretimeout will" | 993 | " occur. The NMI pretimeout will" |
975 | " likely not work\n"); | 994 | " likely not work\n"); |
976 | } | 995 | } |
977 | out_restore: | 996 | out_restore: |
978 | testing_nmi = 0; | 997 | testing_nmi = 0; |
979 | preop_val = old_preop_val; | 998 | preop_val = old_preop_val; |
980 | pretimeout = old_pretimeout; | 999 | pretimeout = old_pretimeout; |
@@ -1009,9 +1028,11 @@ static void ipmi_unregister_watchdog(int ipmi_intf) | |||
1009 | /* Make sure no one can call us any more. */ | 1028 | /* Make sure no one can call us any more. */ |
1010 | misc_deregister(&ipmi_wdog_miscdev); | 1029 | misc_deregister(&ipmi_wdog_miscdev); |
1011 | 1030 | ||
1012 | /* Wait to make sure the message makes it out. The lower layer has | 1031 | /* |
1013 | pointers to our buffers, we want to make sure they are done before | 1032 | * Wait to make sure the message makes it out. The lower layer has |
1014 | we release our memory. */ | 1033 | * pointers to our buffers, we want to make sure they are done before |
1034 | * we release our memory. | ||
1035 | */ | ||
1015 | while (atomic_read(&set_timeout_tofree)) | 1036 | while (atomic_read(&set_timeout_tofree)) |
1016 | schedule_timeout_uninterruptible(1); | 1037 | schedule_timeout_uninterruptible(1); |
1017 | 1038 | ||
@@ -1052,15 +1073,17 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) | |||
1052 | return NOTIFY_STOP; | 1073 | return NOTIFY_STOP; |
1053 | } | 1074 | } |
1054 | 1075 | ||
1055 | /* If we are not expecting a timeout, ignore it. */ | 1076 | /* If we are not expecting a timeout, ignore it. */ |
1056 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) | 1077 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) |
1057 | return NOTIFY_OK; | 1078 | return NOTIFY_OK; |
1058 | 1079 | ||
1059 | if (preaction_val != WDOG_PRETIMEOUT_NMI) | 1080 | if (preaction_val != WDOG_PRETIMEOUT_NMI) |
1060 | return NOTIFY_OK; | 1081 | return NOTIFY_OK; |
1061 | 1082 | ||
1062 | /* If no one else handled the NMI, we assume it was the IPMI | 1083 | /* |
1063 | watchdog. */ | 1084 | * If no one else handled the NMI, we assume it was the IPMI |
1085 | * watchdog. | ||
1086 | */ | ||
1064 | if (preop_val == WDOG_PREOP_PANIC) { | 1087 | if (preop_val == WDOG_PREOP_PANIC) { |
1065 | /* On some machines, the heartbeat will give | 1088 | /* On some machines, the heartbeat will give |
1066 | an error and not work unless we re-enable | 1089 | an error and not work unless we re-enable |
@@ -1082,7 +1105,7 @@ static int wdog_reboot_handler(struct notifier_block *this, | |||
1082 | unsigned long code, | 1105 | unsigned long code, |
1083 | void *unused) | 1106 | void *unused) |
1084 | { | 1107 | { |
1085 | static int reboot_event_handled = 0; | 1108 | static int reboot_event_handled; |
1086 | 1109 | ||
1087 | if ((watchdog_user) && (!reboot_event_handled)) { | 1110 | if ((watchdog_user) && (!reboot_event_handled)) { |
1088 | /* Make sure we only do this once. */ | 1111 | /* Make sure we only do this once. */ |
@@ -1115,7 +1138,7 @@ static int wdog_panic_handler(struct notifier_block *this, | |||
1115 | unsigned long event, | 1138 | unsigned long event, |
1116 | void *unused) | 1139 | void *unused) |
1117 | { | 1140 | { |
1118 | static int panic_event_handled = 0; | 1141 | static int panic_event_handled; |
1119 | 1142 | ||
1120 | /* On a panic, if we have a panic timeout, make sure to extend | 1143 | /* On a panic, if we have a panic timeout, make sure to extend |
1121 | the watchdog timer to a reasonable value to complete the | 1144 | the watchdog timer to a reasonable value to complete the |
@@ -1125,7 +1148,7 @@ static int wdog_panic_handler(struct notifier_block *this, | |||
1125 | ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { | 1148 | ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { |
1126 | /* Make sure we do this only once. */ | 1149 | /* Make sure we do this only once. */ |
1127 | panic_event_handled = 1; | 1150 | panic_event_handled = 1; |
1128 | 1151 | ||
1129 | timeout = 255; | 1152 | timeout = 255; |
1130 | pretimeout = 0; | 1153 | pretimeout = 0; |
1131 | panic_halt_ipmi_set_timeout(); | 1154 | panic_halt_ipmi_set_timeout(); |
@@ -1151,8 +1174,7 @@ static void ipmi_smi_gone(int if_num) | |||
1151 | ipmi_unregister_watchdog(if_num); | 1174 | ipmi_unregister_watchdog(if_num); |
1152 | } | 1175 | } |
1153 | 1176 | ||
1154 | static struct ipmi_smi_watcher smi_watcher = | 1177 | static struct ipmi_smi_watcher smi_watcher = { |
1155 | { | ||
1156 | .owner = THIS_MODULE, | 1178 | .owner = THIS_MODULE, |
1157 | .new_smi = ipmi_new_smi, | 1179 | .new_smi = ipmi_new_smi, |
1158 | .smi_gone = ipmi_smi_gone | 1180 | .smi_gone = ipmi_smi_gone |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index e83623ead441..934ffafedaea 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -364,6 +364,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) | |||
364 | return 0; | 364 | return 0; |
365 | } | 365 | } |
366 | 366 | ||
367 | #ifdef CONFIG_DEVKMEM | ||
367 | static int mmap_kmem(struct file * file, struct vm_area_struct * vma) | 368 | static int mmap_kmem(struct file * file, struct vm_area_struct * vma) |
368 | { | 369 | { |
369 | unsigned long pfn; | 370 | unsigned long pfn; |
@@ -384,6 +385,7 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma) | |||
384 | vma->vm_pgoff = pfn; | 385 | vma->vm_pgoff = pfn; |
385 | return mmap_mem(file, vma); | 386 | return mmap_mem(file, vma); |
386 | } | 387 | } |
388 | #endif | ||
387 | 389 | ||
388 | #ifdef CONFIG_CRASH_DUMP | 390 | #ifdef CONFIG_CRASH_DUMP |
389 | /* | 391 | /* |
@@ -422,6 +424,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf, | |||
422 | extern long vread(char *buf, char *addr, unsigned long count); | 424 | extern long vread(char *buf, char *addr, unsigned long count); |
423 | extern long vwrite(char *buf, char *addr, unsigned long count); | 425 | extern long vwrite(char *buf, char *addr, unsigned long count); |
424 | 426 | ||
427 | #ifdef CONFIG_DEVKMEM | ||
425 | /* | 428 | /* |
426 | * This function reads the *virtual* memory as seen by the kernel. | 429 | * This function reads the *virtual* memory as seen by the kernel. |
427 | */ | 430 | */ |
@@ -626,6 +629,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, | |||
626 | *ppos = p; | 629 | *ppos = p; |
627 | return virtr + wrote; | 630 | return virtr + wrote; |
628 | } | 631 | } |
632 | #endif | ||
629 | 633 | ||
630 | #ifdef CONFIG_DEVPORT | 634 | #ifdef CONFIG_DEVPORT |
631 | static ssize_t read_port(struct file * file, char __user * buf, | 635 | static ssize_t read_port(struct file * file, char __user * buf, |
@@ -803,6 +807,7 @@ static const struct file_operations mem_fops = { | |||
803 | .get_unmapped_area = get_unmapped_area_mem, | 807 | .get_unmapped_area = get_unmapped_area_mem, |
804 | }; | 808 | }; |
805 | 809 | ||
810 | #ifdef CONFIG_DEVKMEM | ||
806 | static const struct file_operations kmem_fops = { | 811 | static const struct file_operations kmem_fops = { |
807 | .llseek = memory_lseek, | 812 | .llseek = memory_lseek, |
808 | .read = read_kmem, | 813 | .read = read_kmem, |
@@ -811,6 +816,7 @@ static const struct file_operations kmem_fops = { | |||
811 | .open = open_kmem, | 816 | .open = open_kmem, |
812 | .get_unmapped_area = get_unmapped_area_mem, | 817 | .get_unmapped_area = get_unmapped_area_mem, |
813 | }; | 818 | }; |
819 | #endif | ||
814 | 820 | ||
815 | static const struct file_operations null_fops = { | 821 | static const struct file_operations null_fops = { |
816 | .llseek = null_lseek, | 822 | .llseek = null_lseek, |
@@ -889,11 +895,13 @@ static int memory_open(struct inode * inode, struct file * filp) | |||
889 | filp->f_mapping->backing_dev_info = | 895 | filp->f_mapping->backing_dev_info = |
890 | &directly_mappable_cdev_bdi; | 896 | &directly_mappable_cdev_bdi; |
891 | break; | 897 | break; |
898 | #ifdef CONFIG_DEVKMEM | ||
892 | case 2: | 899 | case 2: |
893 | filp->f_op = &kmem_fops; | 900 | filp->f_op = &kmem_fops; |
894 | filp->f_mapping->backing_dev_info = | 901 | filp->f_mapping->backing_dev_info = |
895 | &directly_mappable_cdev_bdi; | 902 | &directly_mappable_cdev_bdi; |
896 | break; | 903 | break; |
904 | #endif | ||
897 | case 3: | 905 | case 3: |
898 | filp->f_op = &null_fops; | 906 | filp->f_op = &null_fops; |
899 | break; | 907 | break; |
@@ -942,7 +950,9 @@ static const struct { | |||
942 | const struct file_operations *fops; | 950 | const struct file_operations *fops; |
943 | } devlist[] = { /* list of minor devices */ | 951 | } devlist[] = { /* list of minor devices */ |
944 | {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, | 952 | {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, |
953 | #ifdef CONFIG_DEVKMEM | ||
945 | {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, | 954 | {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, |
955 | #endif | ||
946 | {3, "null", S_IRUGO | S_IWUGO, &null_fops}, | 956 | {3, "null", S_IRUGO | S_IWUGO, &null_fops}, |
947 | #ifdef CONFIG_DEVPORT | 957 | #ifdef CONFIG_DEVPORT |
948 | {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, | 958 | {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, |
diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 4d058dadbfcc..eaace0db0ff4 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c | |||
@@ -263,23 +263,26 @@ EXPORT_SYMBOL(misc_deregister); | |||
263 | 263 | ||
264 | static int __init misc_init(void) | 264 | static int __init misc_init(void) |
265 | { | 265 | { |
266 | #ifdef CONFIG_PROC_FS | 266 | int err; |
267 | struct proc_dir_entry *ent; | ||
268 | 267 | ||
269 | ent = create_proc_entry("misc", 0, NULL); | 268 | #ifdef CONFIG_PROC_FS |
270 | if (ent) | 269 | proc_create("misc", 0, NULL, &misc_proc_fops); |
271 | ent->proc_fops = &misc_proc_fops; | ||
272 | #endif | 270 | #endif |
273 | misc_class = class_create(THIS_MODULE, "misc"); | 271 | misc_class = class_create(THIS_MODULE, "misc"); |
272 | err = PTR_ERR(misc_class); | ||
274 | if (IS_ERR(misc_class)) | 273 | if (IS_ERR(misc_class)) |
275 | return PTR_ERR(misc_class); | 274 | goto fail_remove; |
276 | 275 | ||
277 | if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) { | 276 | err = -EIO; |
278 | printk("unable to get major %d for misc devices\n", | 277 | if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) |
279 | MISC_MAJOR); | 278 | goto fail_printk; |
280 | class_destroy(misc_class); | ||
281 | return -EIO; | ||
282 | } | ||
283 | return 0; | 279 | return 0; |
280 | |||
281 | fail_printk: | ||
282 | printk("unable to get major %d for misc devices\n", MISC_MAJOR); | ||
283 | class_destroy(misc_class); | ||
284 | fail_remove: | ||
285 | remove_proc_entry("misc", NULL); | ||
286 | return err; | ||
284 | } | 287 | } |
285 | subsys_initcall(misc_init); | 288 | subsys_initcall(misc_init); |
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c index 1f978ff87fa8..fa9d3c945f31 100644 --- a/drivers/char/pcmcia/ipwireless/hardware.c +++ b/drivers/char/pcmcia/ipwireless/hardware.c | |||
@@ -354,32 +354,6 @@ struct ipw_rx_packet { | |||
354 | unsigned int channel_idx; | 354 | unsigned int channel_idx; |
355 | }; | 355 | }; |
356 | 356 | ||
357 | #ifdef IPWIRELESS_STATE_DEBUG | ||
358 | int ipwireless_dump_hardware_state(char *p, size_t limit, | ||
359 | struct ipw_hardware *hw) | ||
360 | { | ||
361 | return snprintf(p, limit, | ||
362 | "debug: initializing=%d\n" | ||
363 | "debug: tx_ready=%d\n" | ||
364 | "debug: tx_queued=%d\n" | ||
365 | "debug: rx_ready=%d\n" | ||
366 | "debug: rx_bytes_queued=%d\n" | ||
367 | "debug: blocking_rx=%d\n" | ||
368 | "debug: removed=%d\n" | ||
369 | "debug: hardware.shutting_down=%d\n" | ||
370 | "debug: to_setup=%d\n", | ||
371 | hw->initializing, | ||
372 | hw->tx_ready, | ||
373 | hw->tx_queued, | ||
374 | hw->rx_ready, | ||
375 | hw->rx_bytes_queued, | ||
376 | hw->blocking_rx, | ||
377 | hw->removed, | ||
378 | hw->shutting_down, | ||
379 | hw->to_setup); | ||
380 | } | ||
381 | #endif | ||
382 | |||
383 | static char *data_type(const unsigned char *buf, unsigned length) | 357 | static char *data_type(const unsigned char *buf, unsigned length) |
384 | { | 358 | { |
385 | struct nl_packet_header *hdr = (struct nl_packet_header *) buf; | 359 | struct nl_packet_header *hdr = (struct nl_packet_header *) buf; |
diff --git a/drivers/char/pcmcia/ipwireless/hardware.h b/drivers/char/pcmcia/ipwireless/hardware.h index c83190ffb0e7..19ce5eb266b1 100644 --- a/drivers/char/pcmcia/ipwireless/hardware.h +++ b/drivers/char/pcmcia/ipwireless/hardware.h | |||
@@ -58,7 +58,5 @@ void ipwireless_init_hardware_v1(struct ipw_hardware *hw, | |||
58 | void *reboot_cb_data); | 58 | void *reboot_cb_data); |
59 | void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw); | 59 | void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw); |
60 | void ipwireless_sleep(unsigned int tenths); | 60 | void ipwireless_sleep(unsigned int tenths); |
61 | int ipwireless_dump_hardware_state(char *p, size_t limit, | ||
62 | struct ipw_hardware *hw); | ||
63 | 61 | ||
64 | #endif | 62 | #endif |
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c index d793e68b3e0d..fe914d34f7f6 100644 --- a/drivers/char/pcmcia/ipwireless/network.c +++ b/drivers/char/pcmcia/ipwireless/network.c | |||
@@ -63,21 +63,6 @@ struct ipw_network { | |||
63 | struct work_struct work_go_offline; | 63 | struct work_struct work_go_offline; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | |||
67 | #ifdef IPWIRELESS_STATE_DEBUG | ||
68 | int ipwireless_dump_network_state(char *p, size_t limit, | ||
69 | struct ipw_network *network) | ||
70 | { | ||
71 | return snprintf(p, limit, | ||
72 | "debug: ppp_blocked=%d\n" | ||
73 | "debug: outgoing_packets_queued=%d\n" | ||
74 | "debug: network.shutting_down=%d\n", | ||
75 | network->ppp_blocked, | ||
76 | network->outgoing_packets_queued, | ||
77 | network->shutting_down); | ||
78 | } | ||
79 | #endif | ||
80 | |||
81 | static void notify_packet_sent(void *callback_data, unsigned int packet_length) | 66 | static void notify_packet_sent(void *callback_data, unsigned int packet_length) |
82 | { | 67 | { |
83 | struct ipw_network *network = callback_data; | 68 | struct ipw_network *network = callback_data; |
diff --git a/drivers/char/pcmcia/ipwireless/network.h b/drivers/char/pcmcia/ipwireless/network.h index b0e1e952fd14..ccacd26fc7ef 100644 --- a/drivers/char/pcmcia/ipwireless/network.h +++ b/drivers/char/pcmcia/ipwireless/network.h | |||
@@ -49,7 +49,4 @@ void ipwireless_ppp_close(struct ipw_network *net); | |||
49 | int ipwireless_ppp_channel_index(struct ipw_network *net); | 49 | int ipwireless_ppp_channel_index(struct ipw_network *net); |
50 | int ipwireless_ppp_unit_number(struct ipw_network *net); | 50 | int ipwireless_ppp_unit_number(struct ipw_network *net); |
51 | 51 | ||
52 | int ipwireless_dump_network_state(char *p, size_t limit, | ||
53 | struct ipw_network *net); | ||
54 | |||
55 | #endif | 52 | #endif |
diff --git a/drivers/char/random.c b/drivers/char/random.c index f43c89f7c449..0cf98bd4f2d2 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -272,7 +272,7 @@ static int random_write_wakeup_thresh = 128; | |||
272 | 272 | ||
273 | static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28; | 273 | static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28; |
274 | 274 | ||
275 | static DEFINE_PER_CPU(int, trickle_count) = 0; | 275 | static DEFINE_PER_CPU(int, trickle_count); |
276 | 276 | ||
277 | /* | 277 | /* |
278 | * A pool of size .poolwords is stirred with a primitive polynomial | 278 | * A pool of size .poolwords is stirred with a primitive polynomial |
@@ -370,17 +370,19 @@ static struct poolinfo { | |||
370 | */ | 370 | */ |
371 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); | 371 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); |
372 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); | 372 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); |
373 | static struct fasync_struct *fasync; | ||
373 | 374 | ||
374 | #if 0 | 375 | #if 0 |
375 | static int debug = 0; | 376 | static int debug; |
376 | module_param(debug, bool, 0644); | 377 | module_param(debug, bool, 0644); |
377 | #define DEBUG_ENT(fmt, arg...) do { if (debug) \ | 378 | #define DEBUG_ENT(fmt, arg...) do { \ |
378 | printk(KERN_DEBUG "random %04d %04d %04d: " \ | 379 | if (debug) \ |
379 | fmt,\ | 380 | printk(KERN_DEBUG "random %04d %04d %04d: " \ |
380 | input_pool.entropy_count,\ | 381 | fmt,\ |
381 | blocking_pool.entropy_count,\ | 382 | input_pool.entropy_count,\ |
382 | nonblocking_pool.entropy_count,\ | 383 | blocking_pool.entropy_count,\ |
383 | ## arg); } while (0) | 384 | nonblocking_pool.entropy_count,\ |
385 | ## arg); } while (0) | ||
384 | #else | 386 | #else |
385 | #define DEBUG_ENT(fmt, arg...) do {} while (0) | 387 | #define DEBUG_ENT(fmt, arg...) do {} while (0) |
386 | #endif | 388 | #endif |
@@ -394,7 +396,7 @@ module_param(debug, bool, 0644); | |||
394 | 396 | ||
395 | struct entropy_store; | 397 | struct entropy_store; |
396 | struct entropy_store { | 398 | struct entropy_store { |
397 | /* mostly-read data: */ | 399 | /* read-only data: */ |
398 | struct poolinfo *poolinfo; | 400 | struct poolinfo *poolinfo; |
399 | __u32 *pool; | 401 | __u32 *pool; |
400 | const char *name; | 402 | const char *name; |
@@ -402,7 +404,7 @@ struct entropy_store { | |||
402 | struct entropy_store *pull; | 404 | struct entropy_store *pull; |
403 | 405 | ||
404 | /* read-write data: */ | 406 | /* read-write data: */ |
405 | spinlock_t lock ____cacheline_aligned_in_smp; | 407 | spinlock_t lock; |
406 | unsigned add_ptr; | 408 | unsigned add_ptr; |
407 | int entropy_count; | 409 | int entropy_count; |
408 | int input_rotate; | 410 | int input_rotate; |
@@ -438,25 +440,26 @@ static struct entropy_store nonblocking_pool = { | |||
438 | }; | 440 | }; |
439 | 441 | ||
440 | /* | 442 | /* |
441 | * This function adds a byte into the entropy "pool". It does not | 443 | * This function adds bytes into the entropy "pool". It does not |
442 | * update the entropy estimate. The caller should call | 444 | * update the entropy estimate. The caller should call |
443 | * credit_entropy_store if this is appropriate. | 445 | * credit_entropy_bits if this is appropriate. |
444 | * | 446 | * |
445 | * The pool is stirred with a primitive polynomial of the appropriate | 447 | * The pool is stirred with a primitive polynomial of the appropriate |
446 | * degree, and then twisted. We twist by three bits at a time because | 448 | * degree, and then twisted. We twist by three bits at a time because |
447 | * it's cheap to do so and helps slightly in the expected case where | 449 | * it's cheap to do so and helps slightly in the expected case where |
448 | * the entropy is concentrated in the low-order bits. | 450 | * the entropy is concentrated in the low-order bits. |
449 | */ | 451 | */ |
450 | static void __add_entropy_words(struct entropy_store *r, const __u32 *in, | 452 | static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, |
451 | int nwords, __u32 out[16]) | 453 | int nbytes, __u8 out[64]) |
452 | { | 454 | { |
453 | static __u32 const twist_table[8] = { | 455 | static __u32 const twist_table[8] = { |
454 | 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, | 456 | 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, |
455 | 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; | 457 | 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; |
456 | unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5; | 458 | unsigned long i, j, tap1, tap2, tap3, tap4, tap5; |
457 | int new_rotate, input_rotate; | 459 | int input_rotate; |
458 | int wordmask = r->poolinfo->poolwords - 1; | 460 | int wordmask = r->poolinfo->poolwords - 1; |
459 | __u32 w, next_w; | 461 | const char *bytes = in; |
462 | __u32 w; | ||
460 | unsigned long flags; | 463 | unsigned long flags; |
461 | 464 | ||
462 | /* Taps are constant, so we can load them without holding r->lock. */ | 465 | /* Taps are constant, so we can load them without holding r->lock. */ |
@@ -465,78 +468,76 @@ static void __add_entropy_words(struct entropy_store *r, const __u32 *in, | |||
465 | tap3 = r->poolinfo->tap3; | 468 | tap3 = r->poolinfo->tap3; |
466 | tap4 = r->poolinfo->tap4; | 469 | tap4 = r->poolinfo->tap4; |
467 | tap5 = r->poolinfo->tap5; | 470 | tap5 = r->poolinfo->tap5; |
468 | next_w = *in++; | ||
469 | 471 | ||
470 | spin_lock_irqsave(&r->lock, flags); | 472 | spin_lock_irqsave(&r->lock, flags); |
471 | prefetch_range(r->pool, wordmask); | ||
472 | input_rotate = r->input_rotate; | 473 | input_rotate = r->input_rotate; |
473 | add_ptr = r->add_ptr; | 474 | i = r->add_ptr; |
474 | 475 | ||
475 | while (nwords--) { | 476 | /* mix one byte at a time to simplify size handling and churn faster */ |
476 | w = rol32(next_w, input_rotate); | 477 | while (nbytes--) { |
477 | if (nwords > 0) | 478 | w = rol32(*bytes++, input_rotate & 31); |
478 | next_w = *in++; | 479 | i = (i - 1) & wordmask; |
479 | i = add_ptr = (add_ptr - 1) & wordmask; | ||
480 | /* | ||
481 | * Normally, we add 7 bits of rotation to the pool. | ||
482 | * At the beginning of the pool, add an extra 7 bits | ||
483 | * rotation, so that successive passes spread the | ||
484 | * input bits across the pool evenly. | ||
485 | */ | ||
486 | new_rotate = input_rotate + 14; | ||
487 | if (i) | ||
488 | new_rotate = input_rotate + 7; | ||
489 | input_rotate = new_rotate & 31; | ||
490 | 480 | ||
491 | /* XOR in the various taps */ | 481 | /* XOR in the various taps */ |
482 | w ^= r->pool[i]; | ||
492 | w ^= r->pool[(i + tap1) & wordmask]; | 483 | w ^= r->pool[(i + tap1) & wordmask]; |
493 | w ^= r->pool[(i + tap2) & wordmask]; | 484 | w ^= r->pool[(i + tap2) & wordmask]; |
494 | w ^= r->pool[(i + tap3) & wordmask]; | 485 | w ^= r->pool[(i + tap3) & wordmask]; |
495 | w ^= r->pool[(i + tap4) & wordmask]; | 486 | w ^= r->pool[(i + tap4) & wordmask]; |
496 | w ^= r->pool[(i + tap5) & wordmask]; | 487 | w ^= r->pool[(i + tap5) & wordmask]; |
497 | w ^= r->pool[i]; | 488 | |
489 | /* Mix the result back in with a twist */ | ||
498 | r->pool[i] = (w >> 3) ^ twist_table[w & 7]; | 490 | r->pool[i] = (w >> 3) ^ twist_table[w & 7]; |
491 | |||
492 | /* | ||
493 | * Normally, we add 7 bits of rotation to the pool. | ||
494 | * At the beginning of the pool, add an extra 7 bits | ||
495 | * rotation, so that successive passes spread the | ||
496 | * input bits across the pool evenly. | ||
497 | */ | ||
498 | input_rotate += i ? 7 : 14; | ||
499 | } | 499 | } |
500 | 500 | ||
501 | r->input_rotate = input_rotate; | 501 | r->input_rotate = input_rotate; |
502 | r->add_ptr = add_ptr; | 502 | r->add_ptr = i; |
503 | 503 | ||
504 | if (out) { | 504 | if (out) |
505 | for (i = 0; i < 16; i++) { | 505 | for (j = 0; j < 16; j++) |
506 | out[i] = r->pool[add_ptr]; | 506 | ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; |
507 | add_ptr = (add_ptr - 1) & wordmask; | ||
508 | } | ||
509 | } | ||
510 | 507 | ||
511 | spin_unlock_irqrestore(&r->lock, flags); | 508 | spin_unlock_irqrestore(&r->lock, flags); |
512 | } | 509 | } |
513 | 510 | ||
514 | static inline void add_entropy_words(struct entropy_store *r, const __u32 *in, | 511 | static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) |
515 | int nwords) | ||
516 | { | 512 | { |
517 | __add_entropy_words(r, in, nwords, NULL); | 513 | mix_pool_bytes_extract(r, in, bytes, NULL); |
518 | } | 514 | } |
519 | 515 | ||
520 | /* | 516 | /* |
521 | * Credit (or debit) the entropy store with n bits of entropy | 517 | * Credit (or debit) the entropy store with n bits of entropy |
522 | */ | 518 | */ |
523 | static void credit_entropy_store(struct entropy_store *r, int nbits) | 519 | static void credit_entropy_bits(struct entropy_store *r, int nbits) |
524 | { | 520 | { |
525 | unsigned long flags; | 521 | unsigned long flags; |
526 | 522 | ||
523 | if (!nbits) | ||
524 | return; | ||
525 | |||
527 | spin_lock_irqsave(&r->lock, flags); | 526 | spin_lock_irqsave(&r->lock, flags); |
528 | 527 | ||
529 | if (r->entropy_count + nbits < 0) { | 528 | DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); |
530 | DEBUG_ENT("negative entropy/overflow (%d+%d)\n", | 529 | r->entropy_count += nbits; |
531 | r->entropy_count, nbits); | 530 | if (r->entropy_count < 0) { |
531 | DEBUG_ENT("negative entropy/overflow\n"); | ||
532 | r->entropy_count = 0; | 532 | r->entropy_count = 0; |
533 | } else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) { | 533 | } else if (r->entropy_count > r->poolinfo->POOLBITS) |
534 | r->entropy_count = r->poolinfo->POOLBITS; | 534 | r->entropy_count = r->poolinfo->POOLBITS; |
535 | } else { | 535 | |
536 | r->entropy_count += nbits; | 536 | /* should we wake readers? */ |
537 | if (nbits) | 537 | if (r == &input_pool && |
538 | DEBUG_ENT("added %d entropy credits to %s\n", | 538 | r->entropy_count >= random_read_wakeup_thresh) { |
539 | nbits, r->name); | 539 | wake_up_interruptible(&random_read_wait); |
540 | kill_fasync(&fasync, SIGIO, POLL_IN); | ||
540 | } | 541 | } |
541 | 542 | ||
542 | spin_unlock_irqrestore(&r->lock, flags); | 543 | spin_unlock_irqrestore(&r->lock, flags); |
@@ -551,7 +552,7 @@ static void credit_entropy_store(struct entropy_store *r, int nbits) | |||
551 | /* There is one of these per entropy source */ | 552 | /* There is one of these per entropy source */ |
552 | struct timer_rand_state { | 553 | struct timer_rand_state { |
553 | cycles_t last_time; | 554 | cycles_t last_time; |
554 | long last_delta,last_delta2; | 555 | long last_delta, last_delta2; |
555 | unsigned dont_count_entropy:1; | 556 | unsigned dont_count_entropy:1; |
556 | }; | 557 | }; |
557 | 558 | ||
@@ -586,7 +587,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) | |||
586 | sample.jiffies = jiffies; | 587 | sample.jiffies = jiffies; |
587 | sample.cycles = get_cycles(); | 588 | sample.cycles = get_cycles(); |
588 | sample.num = num; | 589 | sample.num = num; |
589 | add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4); | 590 | mix_pool_bytes(&input_pool, &sample, sizeof(sample)); |
590 | 591 | ||
591 | /* | 592 | /* |
592 | * Calculate number of bits of randomness we probably added. | 593 | * Calculate number of bits of randomness we probably added. |
@@ -620,13 +621,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) | |||
620 | * Round down by 1 bit on general principles, | 621 | * Round down by 1 bit on general principles, |
621 | * and limit entropy entimate to 12 bits. | 622 | * and limit entropy entimate to 12 bits. |
622 | */ | 623 | */ |
623 | credit_entropy_store(&input_pool, | 624 | credit_entropy_bits(&input_pool, |
624 | min_t(int, fls(delta>>1), 11)); | 625 | min_t(int, fls(delta>>1), 11)); |
625 | } | 626 | } |
626 | |||
627 | if(input_pool.entropy_count >= random_read_wakeup_thresh) | ||
628 | wake_up_interruptible(&random_read_wait); | ||
629 | |||
630 | out: | 627 | out: |
631 | preempt_enable(); | 628 | preempt_enable(); |
632 | } | 629 | } |
@@ -677,7 +674,7 @@ void add_disk_randomness(struct gendisk *disk) | |||
677 | * | 674 | * |
678 | *********************************************************************/ | 675 | *********************************************************************/ |
679 | 676 | ||
680 | static ssize_t extract_entropy(struct entropy_store *r, void * buf, | 677 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
681 | size_t nbytes, int min, int rsvd); | 678 | size_t nbytes, int min, int rsvd); |
682 | 679 | ||
683 | /* | 680 | /* |
@@ -704,10 +701,10 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |||
704 | "(%d of %d requested)\n", | 701 | "(%d of %d requested)\n", |
705 | r->name, bytes * 8, nbytes * 8, r->entropy_count); | 702 | r->name, bytes * 8, nbytes * 8, r->entropy_count); |
706 | 703 | ||
707 | bytes=extract_entropy(r->pull, tmp, bytes, | 704 | bytes = extract_entropy(r->pull, tmp, bytes, |
708 | random_read_wakeup_thresh / 8, rsvd); | 705 | random_read_wakeup_thresh / 8, rsvd); |
709 | add_entropy_words(r, tmp, (bytes + 3) / 4); | 706 | mix_pool_bytes(r, tmp, bytes); |
710 | credit_entropy_store(r, bytes*8); | 707 | credit_entropy_bits(r, bytes*8); |
711 | } | 708 | } |
712 | } | 709 | } |
713 | 710 | ||
@@ -744,13 +741,15 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
744 | if (r->limit && nbytes + reserved >= r->entropy_count / 8) | 741 | if (r->limit && nbytes + reserved >= r->entropy_count / 8) |
745 | nbytes = r->entropy_count/8 - reserved; | 742 | nbytes = r->entropy_count/8 - reserved; |
746 | 743 | ||
747 | if(r->entropy_count / 8 >= nbytes + reserved) | 744 | if (r->entropy_count / 8 >= nbytes + reserved) |
748 | r->entropy_count -= nbytes*8; | 745 | r->entropy_count -= nbytes*8; |
749 | else | 746 | else |
750 | r->entropy_count = reserved; | 747 | r->entropy_count = reserved; |
751 | 748 | ||
752 | if (r->entropy_count < random_write_wakeup_thresh) | 749 | if (r->entropy_count < random_write_wakeup_thresh) { |
753 | wake_up_interruptible(&random_write_wait); | 750 | wake_up_interruptible(&random_write_wait); |
751 | kill_fasync(&fasync, SIGIO, POLL_OUT); | ||
752 | } | ||
754 | } | 753 | } |
755 | 754 | ||
756 | DEBUG_ENT("debiting %d entropy credits from %s%s\n", | 755 | DEBUG_ENT("debiting %d entropy credits from %s%s\n", |
@@ -764,45 +763,46 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
764 | static void extract_buf(struct entropy_store *r, __u8 *out) | 763 | static void extract_buf(struct entropy_store *r, __u8 *out) |
765 | { | 764 | { |
766 | int i; | 765 | int i; |
767 | __u32 data[16], buf[5 + SHA_WORKSPACE_WORDS]; | 766 | __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; |
767 | __u8 extract[64]; | ||
768 | |||
769 | /* Generate a hash across the pool, 16 words (512 bits) at a time */ | ||
770 | sha_init(hash); | ||
771 | for (i = 0; i < r->poolinfo->poolwords; i += 16) | ||
772 | sha_transform(hash, (__u8 *)(r->pool + i), workspace); | ||
768 | 773 | ||
769 | sha_init(buf); | ||
770 | /* | 774 | /* |
771 | * As we hash the pool, we mix intermediate values of | 775 | * We mix the hash back into the pool to prevent backtracking |
772 | * the hash back into the pool. This eliminates | 776 | * attacks (where the attacker knows the state of the pool |
773 | * backtracking attacks (where the attacker knows | 777 | * plus the current outputs, and attempts to find previous |
774 | * the state of the pool plus the current outputs, and | 778 | * ouputs), unless the hash function can be inverted. By |
775 | * attempts to find previous ouputs), unless the hash | 779 | * mixing at least a SHA1 worth of hash data back, we make |
776 | * function can be inverted. | 780 | * brute-forcing the feedback as hard as brute-forcing the |
781 | * hash. | ||
777 | */ | 782 | */ |
778 | for (i = 0; i < r->poolinfo->poolwords; i += 16) { | 783 | mix_pool_bytes_extract(r, hash, sizeof(hash), extract); |
779 | /* hash blocks of 16 words = 512 bits */ | ||
780 | sha_transform(buf, (__u8 *)(r->pool + i), buf + 5); | ||
781 | /* feed back portion of the resulting hash */ | ||
782 | add_entropy_words(r, &buf[i % 5], 1); | ||
783 | } | ||
784 | 784 | ||
785 | /* | 785 | /* |
786 | * To avoid duplicates, we atomically extract a | 786 | * To avoid duplicates, we atomically extract a portion of the |
787 | * portion of the pool while mixing, and hash one | 787 | * pool while mixing, and hash one final time. |
788 | * final time. | ||
789 | */ | 788 | */ |
790 | __add_entropy_words(r, &buf[i % 5], 1, data); | 789 | sha_transform(hash, extract, workspace); |
791 | sha_transform(buf, (__u8 *)data, buf + 5); | 790 | memset(extract, 0, sizeof(extract)); |
791 | memset(workspace, 0, sizeof(workspace)); | ||
792 | 792 | ||
793 | /* | 793 | /* |
794 | * In case the hash function has some recognizable | 794 | * In case the hash function has some recognizable output |
795 | * output pattern, we fold it in half. | 795 | * pattern, we fold it in half. Thus, we always feed back |
796 | * twice as much data as we output. | ||
796 | */ | 797 | */ |
797 | 798 | hash[0] ^= hash[3]; | |
798 | buf[0] ^= buf[3]; | 799 | hash[1] ^= hash[4]; |
799 | buf[1] ^= buf[4]; | 800 | hash[2] ^= rol32(hash[2], 16); |
800 | buf[2] ^= rol32(buf[2], 16); | 801 | memcpy(out, hash, EXTRACT_SIZE); |
801 | memcpy(out, buf, EXTRACT_SIZE); | 802 | memset(hash, 0, sizeof(hash)); |
802 | memset(buf, 0, sizeof(buf)); | ||
803 | } | 803 | } |
804 | 804 | ||
805 | static ssize_t extract_entropy(struct entropy_store *r, void * buf, | 805 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
806 | size_t nbytes, int min, int reserved) | 806 | size_t nbytes, int min, int reserved) |
807 | { | 807 | { |
808 | ssize_t ret = 0, i; | 808 | ssize_t ret = 0, i; |
@@ -872,7 +872,6 @@ void get_random_bytes(void *buf, int nbytes) | |||
872 | { | 872 | { |
873 | extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); | 873 | extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); |
874 | } | 874 | } |
875 | |||
876 | EXPORT_SYMBOL(get_random_bytes); | 875 | EXPORT_SYMBOL(get_random_bytes); |
877 | 876 | ||
878 | /* | 877 | /* |
@@ -894,12 +893,11 @@ static void init_std_data(struct entropy_store *r) | |||
894 | spin_unlock_irqrestore(&r->lock, flags); | 893 | spin_unlock_irqrestore(&r->lock, flags); |
895 | 894 | ||
896 | now = ktime_get_real(); | 895 | now = ktime_get_real(); |
897 | add_entropy_words(r, (__u32 *)&now, sizeof(now)/4); | 896 | mix_pool_bytes(r, &now, sizeof(now)); |
898 | add_entropy_words(r, (__u32 *)utsname(), | 897 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); |
899 | sizeof(*(utsname()))/4); | ||
900 | } | 898 | } |
901 | 899 | ||
902 | static int __init rand_initialize(void) | 900 | static int rand_initialize(void) |
903 | { | 901 | { |
904 | init_std_data(&input_pool); | 902 | init_std_data(&input_pool); |
905 | init_std_data(&blocking_pool); | 903 | init_std_data(&blocking_pool); |
@@ -940,7 +938,7 @@ void rand_initialize_disk(struct gendisk *disk) | |||
940 | #endif | 938 | #endif |
941 | 939 | ||
942 | static ssize_t | 940 | static ssize_t |
943 | random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos) | 941 | random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
944 | { | 942 | { |
945 | ssize_t n, retval = 0, count = 0; | 943 | ssize_t n, retval = 0, count = 0; |
946 | 944 | ||
@@ -1002,8 +1000,7 @@ random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos) | |||
1002 | } | 1000 | } |
1003 | 1001 | ||
1004 | static ssize_t | 1002 | static ssize_t |
1005 | urandom_read(struct file * file, char __user * buf, | 1003 | urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
1006 | size_t nbytes, loff_t *ppos) | ||
1007 | { | 1004 | { |
1008 | return extract_entropy_user(&nonblocking_pool, buf, nbytes); | 1005 | return extract_entropy_user(&nonblocking_pool, buf, nbytes); |
1009 | } | 1006 | } |
@@ -1038,16 +1035,15 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count) | |||
1038 | count -= bytes; | 1035 | count -= bytes; |
1039 | p += bytes; | 1036 | p += bytes; |
1040 | 1037 | ||
1041 | add_entropy_words(r, buf, (bytes + 3) / 4); | 1038 | mix_pool_bytes(r, buf, bytes); |
1042 | cond_resched(); | 1039 | cond_resched(); |
1043 | } | 1040 | } |
1044 | 1041 | ||
1045 | return 0; | 1042 | return 0; |
1046 | } | 1043 | } |
1047 | 1044 | ||
1048 | static ssize_t | 1045 | static ssize_t random_write(struct file *file, const char __user *buffer, |
1049 | random_write(struct file * file, const char __user * buffer, | 1046 | size_t count, loff_t *ppos) |
1050 | size_t count, loff_t *ppos) | ||
1051 | { | 1047 | { |
1052 | size_t ret; | 1048 | size_t ret; |
1053 | struct inode *inode = file->f_path.dentry->d_inode; | 1049 | struct inode *inode = file->f_path.dentry->d_inode; |
@@ -1064,9 +1060,7 @@ random_write(struct file * file, const char __user * buffer, | |||
1064 | return (ssize_t)count; | 1060 | return (ssize_t)count; |
1065 | } | 1061 | } |
1066 | 1062 | ||
1067 | static int | 1063 | static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) |
1068 | random_ioctl(struct inode * inode, struct file * file, | ||
1069 | unsigned int cmd, unsigned long arg) | ||
1070 | { | 1064 | { |
1071 | int size, ent_count; | 1065 | int size, ent_count; |
1072 | int __user *p = (int __user *)arg; | 1066 | int __user *p = (int __user *)arg; |
@@ -1074,8 +1068,8 @@ random_ioctl(struct inode * inode, struct file * file, | |||
1074 | 1068 | ||
1075 | switch (cmd) { | 1069 | switch (cmd) { |
1076 | case RNDGETENTCNT: | 1070 | case RNDGETENTCNT: |
1077 | ent_count = input_pool.entropy_count; | 1071 | /* inherently racy, no point locking */ |
1078 | if (put_user(ent_count, p)) | 1072 | if (put_user(input_pool.entropy_count, p)) |
1079 | return -EFAULT; | 1073 | return -EFAULT; |
1080 | return 0; | 1074 | return 0; |
1081 | case RNDADDTOENTCNT: | 1075 | case RNDADDTOENTCNT: |
@@ -1083,13 +1077,7 @@ random_ioctl(struct inode * inode, struct file * file, | |||
1083 | return -EPERM; | 1077 | return -EPERM; |
1084 | if (get_user(ent_count, p)) | 1078 | if (get_user(ent_count, p)) |
1085 | return -EFAULT; | 1079 | return -EFAULT; |
1086 | credit_entropy_store(&input_pool, ent_count); | 1080 | credit_entropy_bits(&input_pool, ent_count); |
1087 | /* | ||
1088 | * Wake up waiting processes if we have enough | ||
1089 | * entropy. | ||
1090 | */ | ||
1091 | if (input_pool.entropy_count >= random_read_wakeup_thresh) | ||
1092 | wake_up_interruptible(&random_read_wait); | ||
1093 | return 0; | 1081 | return 0; |
1094 | case RNDADDENTROPY: | 1082 | case RNDADDENTROPY: |
1095 | if (!capable(CAP_SYS_ADMIN)) | 1083 | if (!capable(CAP_SYS_ADMIN)) |
@@ -1104,39 +1092,45 @@ random_ioctl(struct inode * inode, struct file * file, | |||
1104 | size); | 1092 | size); |
1105 | if (retval < 0) | 1093 | if (retval < 0) |
1106 | return retval; | 1094 | return retval; |
1107 | credit_entropy_store(&input_pool, ent_count); | 1095 | credit_entropy_bits(&input_pool, ent_count); |
1108 | /* | ||
1109 | * Wake up waiting processes if we have enough | ||
1110 | * entropy. | ||
1111 | */ | ||
1112 | if (input_pool.entropy_count >= random_read_wakeup_thresh) | ||
1113 | wake_up_interruptible(&random_read_wait); | ||
1114 | return 0; | 1096 | return 0; |
1115 | case RNDZAPENTCNT: | 1097 | case RNDZAPENTCNT: |
1116 | case RNDCLEARPOOL: | 1098 | case RNDCLEARPOOL: |
1117 | /* Clear the entropy pool counters. */ | 1099 | /* Clear the entropy pool counters. */ |
1118 | if (!capable(CAP_SYS_ADMIN)) | 1100 | if (!capable(CAP_SYS_ADMIN)) |
1119 | return -EPERM; | 1101 | return -EPERM; |
1120 | init_std_data(&input_pool); | 1102 | rand_initialize(); |
1121 | init_std_data(&blocking_pool); | ||
1122 | init_std_data(&nonblocking_pool); | ||
1123 | return 0; | 1103 | return 0; |
1124 | default: | 1104 | default: |
1125 | return -EINVAL; | 1105 | return -EINVAL; |
1126 | } | 1106 | } |
1127 | } | 1107 | } |
1128 | 1108 | ||
1109 | static int random_fasync(int fd, struct file *filp, int on) | ||
1110 | { | ||
1111 | return fasync_helper(fd, filp, on, &fasync); | ||
1112 | } | ||
1113 | |||
1114 | static int random_release(struct inode *inode, struct file *filp) | ||
1115 | { | ||
1116 | return fasync_helper(-1, filp, 0, &fasync); | ||
1117 | } | ||
1118 | |||
1129 | const struct file_operations random_fops = { | 1119 | const struct file_operations random_fops = { |
1130 | .read = random_read, | 1120 | .read = random_read, |
1131 | .write = random_write, | 1121 | .write = random_write, |
1132 | .poll = random_poll, | 1122 | .poll = random_poll, |
1133 | .ioctl = random_ioctl, | 1123 | .unlocked_ioctl = random_ioctl, |
1124 | .fasync = random_fasync, | ||
1125 | .release = random_release, | ||
1134 | }; | 1126 | }; |
1135 | 1127 | ||
1136 | const struct file_operations urandom_fops = { | 1128 | const struct file_operations urandom_fops = { |
1137 | .read = urandom_read, | 1129 | .read = urandom_read, |
1138 | .write = random_write, | 1130 | .write = random_write, |
1139 | .ioctl = random_ioctl, | 1131 | .unlocked_ioctl = random_ioctl, |
1132 | .fasync = random_fasync, | ||
1133 | .release = random_release, | ||
1140 | }; | 1134 | }; |
1141 | 1135 | ||
1142 | /*************************************************************** | 1136 | /*************************************************************** |
@@ -1157,7 +1151,6 @@ void generate_random_uuid(unsigned char uuid_out[16]) | |||
1157 | /* Set the UUID variant to DCE */ | 1151 | /* Set the UUID variant to DCE */ |
1158 | uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; | 1152 | uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; |
1159 | } | 1153 | } |
1160 | |||
1161 | EXPORT_SYMBOL(generate_random_uuid); | 1154 | EXPORT_SYMBOL(generate_random_uuid); |
1162 | 1155 | ||
1163 | /******************************************************************** | 1156 | /******************************************************************** |
@@ -1339,7 +1332,7 @@ ctl_table random_table[] = { | |||
1339 | 1332 | ||
1340 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1333 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
1341 | 1334 | ||
1342 | static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12]) | 1335 | static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12]) |
1343 | { | 1336 | { |
1344 | __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; | 1337 | __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; |
1345 | 1338 | ||
@@ -1487,8 +1480,8 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, | |||
1487 | */ | 1480 | */ |
1488 | 1481 | ||
1489 | memcpy(hash, saddr, 16); | 1482 | memcpy(hash, saddr, 16); |
1490 | hash[4]=((__force u16)sport << 16) + (__force u16)dport; | 1483 | hash[4] = ((__force u16)sport << 16) + (__force u16)dport; |
1491 | memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7); | 1484 | memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); |
1492 | 1485 | ||
1493 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; | 1486 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; |
1494 | seq += keyptr->count; | 1487 | seq += keyptr->count; |
@@ -1538,10 +1531,10 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1538 | * Note that the words are placed into the starting vector, which is | 1531 | * Note that the words are placed into the starting vector, which is |
1539 | * then mixed with a partial MD4 over random data. | 1532 | * then mixed with a partial MD4 over random data. |
1540 | */ | 1533 | */ |
1541 | hash[0]=(__force u32)saddr; | 1534 | hash[0] = (__force u32)saddr; |
1542 | hash[1]=(__force u32)daddr; | 1535 | hash[1] = (__force u32)daddr; |
1543 | hash[2]=((__force u16)sport << 16) + (__force u16)dport; | 1536 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; |
1544 | hash[3]=keyptr->secret[11]; | 1537 | hash[3] = keyptr->secret[11]; |
1545 | 1538 | ||
1546 | seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; | 1539 | seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; |
1547 | seq += keyptr->count; | 1540 | seq += keyptr->count; |
@@ -1556,10 +1549,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1556 | * Choosing a clock of 64 ns period is OK. (period of 274 s) | 1549 | * Choosing a clock of 64 ns period is OK. (period of 274 s) |
1557 | */ | 1550 | */ |
1558 | seq += ktime_to_ns(ktime_get_real()) >> 6; | 1551 | seq += ktime_to_ns(ktime_get_real()) >> 6; |
1559 | #if 0 | 1552 | |
1560 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", | ||
1561 | saddr, daddr, sport, dport, seq); | ||
1562 | #endif | ||
1563 | return seq; | 1553 | return seq; |
1564 | } | 1554 | } |
1565 | 1555 | ||
@@ -1582,14 +1572,15 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) | |||
1582 | } | 1572 | } |
1583 | 1573 | ||
1584 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1574 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
1585 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) | 1575 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
1576 | __be16 dport) | ||
1586 | { | 1577 | { |
1587 | struct keydata *keyptr = get_keyptr(); | 1578 | struct keydata *keyptr = get_keyptr(); |
1588 | u32 hash[12]; | 1579 | u32 hash[12]; |
1589 | 1580 | ||
1590 | memcpy(hash, saddr, 16); | 1581 | memcpy(hash, saddr, 16); |
1591 | hash[4] = (__force u32)dport; | 1582 | hash[4] = (__force u32)dport; |
1592 | memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7); | 1583 | memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); |
1593 | 1584 | ||
1594 | return twothirdsMD4Transform((const __u32 *)daddr, hash); | 1585 | return twothirdsMD4Transform((const __u32 *)daddr, hash); |
1595 | } | 1586 | } |
@@ -1617,13 +1608,9 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, | |||
1617 | 1608 | ||
1618 | seq += ktime_to_ns(ktime_get_real()); | 1609 | seq += ktime_to_ns(ktime_get_real()); |
1619 | seq &= (1ull << 48) - 1; | 1610 | seq &= (1ull << 48) - 1; |
1620 | #if 0 | 1611 | |
1621 | printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n", | ||
1622 | saddr, daddr, sport, dport, seq); | ||
1623 | #endif | ||
1624 | return seq; | 1612 | return seq; |
1625 | } | 1613 | } |
1626 | |||
1627 | EXPORT_SYMBOL(secure_dccp_sequence_number); | 1614 | EXPORT_SYMBOL(secure_dccp_sequence_number); |
1628 | #endif | 1615 | #endif |
1629 | 1616 | ||
diff --git a/drivers/char/rocket_int.h b/drivers/char/rocket_int.h index b01d38125a8f..143cc432fdb2 100644 --- a/drivers/char/rocket_int.h +++ b/drivers/char/rocket_int.h | |||
@@ -55,7 +55,7 @@ static inline void sOutW(unsigned short port, unsigned short value) | |||
55 | 55 | ||
56 | static inline void out32(unsigned short port, Byte_t *p) | 56 | static inline void out32(unsigned short port, Byte_t *p) |
57 | { | 57 | { |
58 | u32 value = le32_to_cpu(get_unaligned((__le32 *)p)); | 58 | u32 value = get_unaligned_le32(p); |
59 | #ifdef ROCKET_DEBUG_IO | 59 | #ifdef ROCKET_DEBUG_IO |
60 | printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value); | 60 | printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value); |
61 | #endif | 61 | #endif |
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index e2ec2ee4cf79..5f80a9dff573 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c | |||
@@ -1069,10 +1069,8 @@ no_irq: | |||
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | #ifdef CONFIG_PROC_FS | 1071 | #ifdef CONFIG_PROC_FS |
1072 | ent = create_proc_entry("driver/rtc", 0, NULL); | 1072 | ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops); |
1073 | if (ent) | 1073 | if (!ent) |
1074 | ent->proc_fops = &rtc_proc_fops; | ||
1075 | else | ||
1076 | printk(KERN_WARNING "rtc: Failed to register with procfs.\n"); | 1074 | printk(KERN_WARNING "rtc: Failed to register with procfs.\n"); |
1077 | #endif | 1075 | #endif |
1078 | 1076 | ||
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c index 1b75b0b7d542..31a7765eaf73 100644 --- a/drivers/char/snsc_event.c +++ b/drivers/char/snsc_event.c | |||
@@ -63,16 +63,13 @@ static int | |||
63 | scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) | 63 | scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) |
64 | { | 64 | { |
65 | char *desc_end; | 65 | char *desc_end; |
66 | __be32 from_buf; | ||
67 | 66 | ||
68 | /* record event source address */ | 67 | /* record event source address */ |
69 | from_buf = get_unaligned((__be32 *)event); | 68 | *src = get_unaligned_be32(event); |
70 | *src = be32_to_cpup(&from_buf); | ||
71 | event += 4; /* move on to event code */ | 69 | event += 4; /* move on to event code */ |
72 | 70 | ||
73 | /* record the system controller's event code */ | 71 | /* record the system controller's event code */ |
74 | from_buf = get_unaligned((__be32 *)event); | 72 | *code = get_unaligned_be32(event); |
75 | *code = be32_to_cpup(&from_buf); | ||
76 | event += 4; /* move on to event arguments */ | 73 | event += 4; /* move on to event arguments */ |
77 | 74 | ||
78 | /* how many arguments are in the packet? */ | 75 | /* how many arguments are in the packet? */ |
@@ -86,8 +83,7 @@ scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) | |||
86 | /* not an integer argument, so give up */ | 83 | /* not an integer argument, so give up */ |
87 | return -1; | 84 | return -1; |
88 | } | 85 | } |
89 | from_buf = get_unaligned((__be32 *)event); | 86 | *esp_code = get_unaligned_be32(event); |
90 | *esp_code = be32_to_cpup(&from_buf); | ||
91 | event += 4; | 87 | event += 4; |
92 | 88 | ||
93 | /* parse out the event description */ | 89 | /* parse out the event description */ |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 1ade193c9128..9e9bad8bdcf4 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -196,6 +196,48 @@ static struct sysrq_key_op sysrq_showlocks_op = { | |||
196 | #define sysrq_showlocks_op (*(struct sysrq_key_op *)0) | 196 | #define sysrq_showlocks_op (*(struct sysrq_key_op *)0) |
197 | #endif | 197 | #endif |
198 | 198 | ||
199 | #ifdef CONFIG_SMP | ||
200 | static DEFINE_SPINLOCK(show_lock); | ||
201 | |||
202 | static void showacpu(void *dummy) | ||
203 | { | ||
204 | unsigned long flags; | ||
205 | |||
206 | /* Idle CPUs have no interesting backtrace. */ | ||
207 | if (idle_cpu(smp_processor_id())) | ||
208 | return; | ||
209 | |||
210 | spin_lock_irqsave(&show_lock, flags); | ||
211 | printk(KERN_INFO "CPU%d:\n", smp_processor_id()); | ||
212 | show_stack(NULL, NULL); | ||
213 | spin_unlock_irqrestore(&show_lock, flags); | ||
214 | } | ||
215 | |||
216 | static void sysrq_showregs_othercpus(struct work_struct *dummy) | ||
217 | { | ||
218 | smp_call_function(showacpu, NULL, 0, 0); | ||
219 | } | ||
220 | |||
221 | static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); | ||
222 | |||
223 | static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) | ||
224 | { | ||
225 | struct pt_regs *regs = get_irq_regs(); | ||
226 | if (regs) { | ||
227 | printk(KERN_INFO "CPU%d:\n", smp_processor_id()); | ||
228 | show_regs(regs); | ||
229 | } | ||
230 | schedule_work(&sysrq_showallcpus); | ||
231 | } | ||
232 | |||
233 | static struct sysrq_key_op sysrq_showallcpus_op = { | ||
234 | .handler = sysrq_handle_showallcpus, | ||
235 | .help_msg = "aLlcpus", | ||
236 | .action_msg = "Show backtrace of all active CPUs", | ||
237 | .enable_mask = SYSRQ_ENABLE_DUMP, | ||
238 | }; | ||
239 | #endif | ||
240 | |||
199 | static void sysrq_handle_showregs(int key, struct tty_struct *tty) | 241 | static void sysrq_handle_showregs(int key, struct tty_struct *tty) |
200 | { | 242 | { |
201 | struct pt_regs *regs = get_irq_regs(); | 243 | struct pt_regs *regs = get_irq_regs(); |
@@ -340,7 +382,11 @@ static struct sysrq_key_op *sysrq_key_table[36] = { | |||
340 | &sysrq_kill_op, /* i */ | 382 | &sysrq_kill_op, /* i */ |
341 | NULL, /* j */ | 383 | NULL, /* j */ |
342 | &sysrq_SAK_op, /* k */ | 384 | &sysrq_SAK_op, /* k */ |
385 | #ifdef CONFIG_SMP | ||
386 | &sysrq_showallcpus_op, /* l */ | ||
387 | #else | ||
343 | NULL, /* l */ | 388 | NULL, /* l */ |
389 | #endif | ||
344 | &sysrq_showmem_op, /* m */ | 390 | &sysrq_showmem_op, /* m */ |
345 | &sysrq_unrt_op, /* n */ | 391 | &sysrq_unrt_op, /* n */ |
346 | /* o: This will often be registered as 'Off' at init time */ | 392 | /* o: This will often be registered as 'Off' at init time */ |
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index ce5ebe3b168f..64f1ceed0b2c 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c | |||
@@ -520,12 +520,11 @@ static int __init toshiba_init(void) | |||
520 | { | 520 | { |
521 | struct proc_dir_entry *pde; | 521 | struct proc_dir_entry *pde; |
522 | 522 | ||
523 | pde = create_proc_entry("toshiba", 0, NULL); | 523 | pde = proc_create("toshiba", 0, NULL, &proc_toshiba_fops); |
524 | if (!pde) { | 524 | if (!pde) { |
525 | misc_deregister(&tosh_device); | 525 | misc_deregister(&tosh_device); |
526 | return -ENOMEM; | 526 | return -ENOMEM; |
527 | } | 527 | } |
528 | pde->proc_fops = &proc_toshiba_fops; | ||
529 | } | 528 | } |
530 | #endif | 529 | #endif |
531 | 530 | ||
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 8f3f7620f95a..3738cfa209ff 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig | |||
@@ -23,7 +23,7 @@ if TCG_TPM | |||
23 | 23 | ||
24 | config TCG_TIS | 24 | config TCG_TIS |
25 | tristate "TPM Interface Specification 1.2 Interface" | 25 | tristate "TPM Interface Specification 1.2 Interface" |
26 | depends on PNPACPI | 26 | depends on PNP |
27 | ---help--- | 27 | ---help--- |
28 | If you have a TPM security chip that is compliant with the | 28 | If you have a TPM security chip that is compliant with the |
29 | TCG TIS 1.2 TPM specification say Yes and it will be accessible | 29 | TCG TIS 1.2 TPM specification say Yes and it will be accessible |
@@ -32,7 +32,6 @@ config TCG_TIS | |||
32 | 32 | ||
33 | config TCG_NSC | 33 | config TCG_NSC |
34 | tristate "National Semiconductor TPM Interface" | 34 | tristate "National Semiconductor TPM Interface" |
35 | depends on PNPACPI | ||
36 | ---help--- | 35 | ---help--- |
37 | If you have a TPM security chip from National Semiconductor | 36 | If you have a TPM security chip from National Semiconductor |
38 | say Yes and it will be accessible from within Linux. To | 37 | say Yes and it will be accessible from within Linux. To |
@@ -48,7 +47,7 @@ config TCG_ATMEL | |||
48 | 47 | ||
49 | config TCG_INFINEON | 48 | config TCG_INFINEON |
50 | tristate "Infineon Technologies TPM Interface" | 49 | tristate "Infineon Technologies TPM Interface" |
51 | depends on PNPACPI | 50 | depends on PNP |
52 | ---help--- | 51 | ---help--- |
53 | If you have a TPM security chip from Infineon Technologies | 52 | If you have a TPM security chip from Infineon Technologies |
54 | (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it | 53 | (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it |
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 6313326bc41f..ab18c1e7b115 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c | |||
@@ -264,7 +264,7 @@ static const struct tpm_vendor_specific tpm_nsc = { | |||
264 | 264 | ||
265 | static struct platform_device *pdev = NULL; | 265 | static struct platform_device *pdev = NULL; |
266 | 266 | ||
267 | static void __devexit tpm_nsc_remove(struct device *dev) | 267 | static void tpm_nsc_remove(struct device *dev) |
268 | { | 268 | { |
269 | struct tpm_chip *chip = dev_get_drvdata(dev); | 269 | struct tpm_chip *chip = dev_get_drvdata(dev); |
270 | if ( chip ) { | 270 | if ( chip ) { |
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c index db7a731e2362..58aad63831f4 100644 --- a/drivers/char/viotape.c +++ b/drivers/char/viotape.c | |||
@@ -249,6 +249,7 @@ static int proc_viotape_open(struct inode *inode, struct file *file) | |||
249 | } | 249 | } |
250 | 250 | ||
251 | static const struct file_operations proc_viotape_operations = { | 251 | static const struct file_operations proc_viotape_operations = { |
252 | .owner = THIS_MODULE, | ||
252 | .open = proc_viotape_open, | 253 | .open = proc_viotape_open, |
253 | .read = seq_read, | 254 | .read = seq_read, |
254 | .llseek = seq_lseek, | 255 | .llseek = seq_lseek, |
@@ -915,7 +916,6 @@ static struct vio_driver viotape_driver = { | |||
915 | int __init viotap_init(void) | 916 | int __init viotap_init(void) |
916 | { | 917 | { |
917 | int ret; | 918 | int ret; |
918 | struct proc_dir_entry *e; | ||
919 | 919 | ||
920 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 920 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
921 | return -ENODEV; | 921 | return -ENODEV; |
@@ -968,11 +968,8 @@ int __init viotap_init(void) | |||
968 | if (ret) | 968 | if (ret) |
969 | goto unreg_class; | 969 | goto unreg_class; |
970 | 970 | ||
971 | e = create_proc_entry("iSeries/viotape", S_IFREG|S_IRUGO, NULL); | 971 | proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL, |
972 | if (e) { | 972 | &proc_viotape_operations); |
973 | e->owner = THIS_MODULE; | ||
974 | e->proc_fops = &proc_viotape_operations; | ||
975 | } | ||
976 | 973 | ||
977 | return 0; | 974 | return 0; |
978 | 975 | ||
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index df4c3ead9e2b..1c2660477135 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -301,7 +301,7 @@ static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr) | |||
301 | d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); | 301 | d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); |
302 | s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr)); | 302 | s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr)); |
303 | scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row); | 303 | scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row); |
304 | scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char, | 304 | scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_scrl_erase_char, |
305 | vc->vc_size_row * nr); | 305 | vc->vc_size_row * nr); |
306 | } | 306 | } |
307 | 307 | ||
@@ -319,7 +319,7 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr) | |||
319 | s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); | 319 | s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); |
320 | step = vc->vc_cols * nr; | 320 | step = vc->vc_cols * nr; |
321 | scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row); | 321 | scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row); |
322 | scr_memsetw(s, vc->vc_video_erase_char, 2 * step); | 322 | scr_memsetw(s, vc->vc_scrl_erase_char, 2 * step); |
323 | } | 323 | } |
324 | 324 | ||
325 | static void do_update_region(struct vc_data *vc, unsigned long start, int count) | 325 | static void do_update_region(struct vc_data *vc, unsigned long start, int count) |
@@ -400,7 +400,7 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, | |||
400 | * Bit 7 : blink | 400 | * Bit 7 : blink |
401 | */ | 401 | */ |
402 | { | 402 | { |
403 | u8 a = vc->vc_color; | 403 | u8 a = _color; |
404 | if (!vc->vc_can_do_color) | 404 | if (!vc->vc_can_do_color) |
405 | return _intensity | | 405 | return _intensity | |
406 | (_italic ? 2 : 0) | | 406 | (_italic ? 2 : 0) | |
@@ -434,6 +434,7 @@ static void update_attr(struct vc_data *vc) | |||
434 | vc->vc_blink, vc->vc_underline, | 434 | vc->vc_blink, vc->vc_underline, |
435 | vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic); | 435 | vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic); |
436 | vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' '; | 436 | vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' '; |
437 | vc->vc_scrl_erase_char = (build_attr(vc, vc->vc_def_color, 1, false, false, false, false) << 8) | ' '; | ||
437 | } | 438 | } |
438 | 439 | ||
439 | /* Note: inverting the screen twice should revert to the original state */ | 440 | /* Note: inverting the screen twice should revert to the original state */ |
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 2b382990fe58..6e6c3c4aea6b 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -67,7 +67,7 @@ config EDAC_E7XXX | |||
67 | E7205, E7500, E7501 and E7505 server chipsets. | 67 | E7205, E7500, E7501 and E7505 server chipsets. |
68 | 68 | ||
69 | config EDAC_E752X | 69 | config EDAC_E752X |
70 | tristate "Intel e752x (e7520, e7525, e7320)" | 70 | tristate "Intel e752x (e7520, e7525, e7320) and 3100" |
71 | depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG | 71 | depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG |
72 | help | 72 | help |
73 | Support for error detection and correction on the Intel | 73 | Support for error detection and correction on the Intel |
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index f22075410591..2b95f1a3edfc 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
18 | #include <linux/pci_ids.h> | 18 | #include <linux/pci_ids.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/edac.h> | ||
20 | #include "edac_core.h" | 21 | #include "edac_core.h" |
21 | 22 | ||
22 | #define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ | 23 | #define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ |
@@ -344,6 +345,9 @@ static struct pci_driver amd76x_driver = { | |||
344 | 345 | ||
345 | static int __init amd76x_init(void) | 346 | static int __init amd76x_init(void) |
346 | { | 347 | { |
348 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
349 | opstate_init(); | ||
350 | |||
347 | return pci_register_driver(&amd76x_driver); | 351 | return pci_register_driver(&amd76x_driver); |
348 | } | 352 | } |
349 | 353 | ||
@@ -358,3 +362,6 @@ module_exit(amd76x_exit); | |||
358 | MODULE_LICENSE("GPL"); | 362 | MODULE_LICENSE("GPL"); |
359 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); | 363 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); |
360 | MODULE_DESCRIPTION("MC support for AMD 76x memory controllers"); | 364 | MODULE_DESCRIPTION("MC support for AMD 76x memory controllers"); |
365 | |||
366 | module_param(edac_op_state, int, 0444); | ||
367 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index 6eb434749cd5..c94a0eb492cb 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #define EDAC_MOD_STR "e752x_edac" | 29 | #define EDAC_MOD_STR "e752x_edac" |
30 | 30 | ||
31 | static int force_function_unhide; | 31 | static int force_function_unhide; |
32 | static int sysbus_parity = -1; | ||
32 | 33 | ||
33 | static struct edac_pci_ctl_info *e752x_pci; | 34 | static struct edac_pci_ctl_info *e752x_pci; |
34 | 35 | ||
@@ -62,6 +63,14 @@ static struct edac_pci_ctl_info *e752x_pci; | |||
62 | #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 | 63 | #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 |
63 | #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ | 64 | #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ |
64 | 65 | ||
66 | #ifndef PCI_DEVICE_ID_INTEL_3100_0 | ||
67 | #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0 | ||
68 | #endif /* PCI_DEVICE_ID_INTEL_3100_0 */ | ||
69 | |||
70 | #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR | ||
71 | #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1 | ||
72 | #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */ | ||
73 | |||
65 | #define E752X_NR_CSROWS 8 /* number of csrows */ | 74 | #define E752X_NR_CSROWS 8 /* number of csrows */ |
66 | 75 | ||
67 | /* E752X register addresses - device 0 function 0 */ | 76 | /* E752X register addresses - device 0 function 0 */ |
@@ -152,6 +161,12 @@ static struct edac_pci_ctl_info *e752x_pci; | |||
152 | /* error syndrome register (16b) */ | 161 | /* error syndrome register (16b) */ |
153 | #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ | 162 | #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ |
154 | 163 | ||
164 | /* 3100 IMCH specific register addresses - device 0 function 1 */ | ||
165 | #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */ | ||
166 | #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */ | ||
167 | #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */ | ||
168 | #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */ | ||
169 | |||
155 | /* ICH5R register addresses - device 30 function 0 */ | 170 | /* ICH5R register addresses - device 30 function 0 */ |
156 | #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ | 171 | #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ |
157 | #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ | 172 | #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ |
@@ -160,7 +175,8 @@ static struct edac_pci_ctl_info *e752x_pci; | |||
160 | enum e752x_chips { | 175 | enum e752x_chips { |
161 | E7520 = 0, | 176 | E7520 = 0, |
162 | E7525 = 1, | 177 | E7525 = 1, |
163 | E7320 = 2 | 178 | E7320 = 2, |
179 | I3100 = 3 | ||
164 | }; | 180 | }; |
165 | 181 | ||
166 | struct e752x_pvt { | 182 | struct e752x_pvt { |
@@ -185,8 +201,10 @@ struct e752x_dev_info { | |||
185 | struct e752x_error_info { | 201 | struct e752x_error_info { |
186 | u32 ferr_global; | 202 | u32 ferr_global; |
187 | u32 nerr_global; | 203 | u32 nerr_global; |
188 | u8 hi_ferr; | 204 | u32 nsi_ferr; /* 3100 only */ |
189 | u8 hi_nerr; | 205 | u32 nsi_nerr; /* 3100 only */ |
206 | u8 hi_ferr; /* all but 3100 */ | ||
207 | u8 hi_nerr; /* all but 3100 */ | ||
190 | u16 sysbus_ferr; | 208 | u16 sysbus_ferr; |
191 | u16 sysbus_nerr; | 209 | u16 sysbus_nerr; |
192 | u8 buf_ferr; | 210 | u8 buf_ferr; |
@@ -215,6 +233,10 @@ static const struct e752x_dev_info e752x_devs[] = { | |||
215 | .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, | 233 | .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, |
216 | .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, | 234 | .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, |
217 | .ctl_name = "E7320"}, | 235 | .ctl_name = "E7320"}, |
236 | [I3100] = { | ||
237 | .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR, | ||
238 | .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0, | ||
239 | .ctl_name = "3100"}, | ||
218 | }; | 240 | }; |
219 | 241 | ||
220 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, | 242 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, |
@@ -402,7 +424,7 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, | |||
402 | static char *global_message[11] = { | 424 | static char *global_message[11] = { |
403 | "PCI Express C1", "PCI Express C", "PCI Express B1", | 425 | "PCI Express C1", "PCI Express C", "PCI Express B1", |
404 | "PCI Express B", "PCI Express A1", "PCI Express A", | 426 | "PCI Express B", "PCI Express A1", "PCI Express A", |
405 | "DMA Controler", "HUB Interface", "System Bus", | 427 | "DMA Controler", "HUB or NS Interface", "System Bus", |
406 | "DRAM Controler", "Internal Buffer" | 428 | "DRAM Controler", "Internal Buffer" |
407 | }; | 429 | }; |
408 | 430 | ||
@@ -455,6 +477,63 @@ static inline void hub_error(int fatal, u8 errors, int *error_found, | |||
455 | do_hub_error(fatal, errors); | 477 | do_hub_error(fatal, errors); |
456 | } | 478 | } |
457 | 479 | ||
480 | #define NSI_FATAL_MASK 0x0c080081 | ||
481 | #define NSI_NON_FATAL_MASK 0x23a0ba64 | ||
482 | #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK) | ||
483 | |||
484 | static char *nsi_message[30] = { | ||
485 | "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */ | ||
486 | "", /* reserved */ | ||
487 | "NSI Parity Error", /* bit 2, non-fatal */ | ||
488 | "", /* reserved */ | ||
489 | "", /* reserved */ | ||
490 | "Correctable Error Message", /* bit 5, non-fatal */ | ||
491 | "Non-Fatal Error Message", /* bit 6, non-fatal */ | ||
492 | "Fatal Error Message", /* bit 7, fatal */ | ||
493 | "", /* reserved */ | ||
494 | "Receiver Error", /* bit 9, non-fatal */ | ||
495 | "", /* reserved */ | ||
496 | "Bad TLP", /* bit 11, non-fatal */ | ||
497 | "Bad DLLP", /* bit 12, non-fatal */ | ||
498 | "REPLAY_NUM Rollover", /* bit 13, non-fatal */ | ||
499 | "", /* reserved */ | ||
500 | "Replay Timer Timeout", /* bit 15, non-fatal */ | ||
501 | "", /* reserved */ | ||
502 | "", /* reserved */ | ||
503 | "", /* reserved */ | ||
504 | "Data Link Protocol Error", /* bit 19, fatal */ | ||
505 | "", /* reserved */ | ||
506 | "Poisoned TLP", /* bit 21, non-fatal */ | ||
507 | "", /* reserved */ | ||
508 | "Completion Timeout", /* bit 23, non-fatal */ | ||
509 | "Completer Abort", /* bit 24, non-fatal */ | ||
510 | "Unexpected Completion", /* bit 25, non-fatal */ | ||
511 | "Receiver Overflow", /* bit 26, fatal */ | ||
512 | "Malformed TLP", /* bit 27, fatal */ | ||
513 | "", /* reserved */ | ||
514 | "Unsupported Request" /* bit 29, non-fatal */ | ||
515 | }; | ||
516 | |||
517 | static void do_nsi_error(int fatal, u32 errors) | ||
518 | { | ||
519 | int i; | ||
520 | |||
521 | for (i = 0; i < 30; i++) { | ||
522 | if (errors & (1 << i)) | ||
523 | printk(KERN_WARNING "%sError %s\n", | ||
524 | fatal_message[fatal], nsi_message[i]); | ||
525 | } | ||
526 | } | ||
527 | |||
528 | static inline void nsi_error(int fatal, u32 errors, int *error_found, | ||
529 | int handle_error) | ||
530 | { | ||
531 | *error_found = 1; | ||
532 | |||
533 | if (handle_error) | ||
534 | do_nsi_error(fatal, errors); | ||
535 | } | ||
536 | |||
458 | static char *membuf_message[4] = { | 537 | static char *membuf_message[4] = { |
459 | "Internal PMWB to DRAM parity", | 538 | "Internal PMWB to DRAM parity", |
460 | "Internal PMWB to System Bus Parity", | 539 | "Internal PMWB to System Bus Parity", |
@@ -546,6 +625,31 @@ static void e752x_check_hub_interface(struct e752x_error_info *info, | |||
546 | } | 625 | } |
547 | } | 626 | } |
548 | 627 | ||
628 | static void e752x_check_ns_interface(struct e752x_error_info *info, | ||
629 | int *error_found, int handle_error) | ||
630 | { | ||
631 | u32 stat32; | ||
632 | |||
633 | stat32 = info->nsi_ferr; | ||
634 | if (stat32 & NSI_ERR_MASK) { /* Error, so process */ | ||
635 | if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */ | ||
636 | nsi_error(1, stat32 & NSI_FATAL_MASK, error_found, | ||
637 | handle_error); | ||
638 | if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */ | ||
639 | nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found, | ||
640 | handle_error); | ||
641 | } | ||
642 | stat32 = info->nsi_nerr; | ||
643 | if (stat32 & NSI_ERR_MASK) { | ||
644 | if (stat32 & NSI_FATAL_MASK) | ||
645 | nsi_error(1, stat32 & NSI_FATAL_MASK, error_found, | ||
646 | handle_error); | ||
647 | if (stat32 & NSI_NON_FATAL_MASK) | ||
648 | nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found, | ||
649 | handle_error); | ||
650 | } | ||
651 | } | ||
652 | |||
549 | static void e752x_check_sysbus(struct e752x_error_info *info, | 653 | static void e752x_check_sysbus(struct e752x_error_info *info, |
550 | int *error_found, int handle_error) | 654 | int *error_found, int handle_error) |
551 | { | 655 | { |
@@ -653,7 +757,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci, | |||
653 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); | 757 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); |
654 | 758 | ||
655 | if (info->ferr_global) { | 759 | if (info->ferr_global) { |
656 | pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr); | 760 | if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { |
761 | pci_read_config_dword(dev, I3100_NSI_FERR, | ||
762 | &info->nsi_ferr); | ||
763 | info->hi_ferr = 0; | ||
764 | } else { | ||
765 | pci_read_config_byte(dev, E752X_HI_FERR, | ||
766 | &info->hi_ferr); | ||
767 | info->nsi_ferr = 0; | ||
768 | } | ||
657 | pci_read_config_word(dev, E752X_SYSBUS_FERR, | 769 | pci_read_config_word(dev, E752X_SYSBUS_FERR, |
658 | &info->sysbus_ferr); | 770 | &info->sysbus_ferr); |
659 | pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); | 771 | pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); |
@@ -669,10 +781,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci, | |||
669 | pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, | 781 | pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, |
670 | &info->dram_retr_add); | 782 | &info->dram_retr_add); |
671 | 783 | ||
784 | /* ignore the reserved bits just in case */ | ||
672 | if (info->hi_ferr & 0x7f) | 785 | if (info->hi_ferr & 0x7f) |
673 | pci_write_config_byte(dev, E752X_HI_FERR, | 786 | pci_write_config_byte(dev, E752X_HI_FERR, |
674 | info->hi_ferr); | 787 | info->hi_ferr); |
675 | 788 | ||
789 | if (info->nsi_ferr & NSI_ERR_MASK) | ||
790 | pci_write_config_dword(dev, I3100_NSI_FERR, | ||
791 | info->nsi_ferr); | ||
792 | |||
676 | if (info->sysbus_ferr) | 793 | if (info->sysbus_ferr) |
677 | pci_write_config_word(dev, E752X_SYSBUS_FERR, | 794 | pci_write_config_word(dev, E752X_SYSBUS_FERR, |
678 | info->sysbus_ferr); | 795 | info->sysbus_ferr); |
@@ -692,7 +809,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci, | |||
692 | pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global); | 809 | pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global); |
693 | 810 | ||
694 | if (info->nerr_global) { | 811 | if (info->nerr_global) { |
695 | pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr); | 812 | if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { |
813 | pci_read_config_dword(dev, I3100_NSI_NERR, | ||
814 | &info->nsi_nerr); | ||
815 | info->hi_nerr = 0; | ||
816 | } else { | ||
817 | pci_read_config_byte(dev, E752X_HI_NERR, | ||
818 | &info->hi_nerr); | ||
819 | info->nsi_nerr = 0; | ||
820 | } | ||
696 | pci_read_config_word(dev, E752X_SYSBUS_NERR, | 821 | pci_read_config_word(dev, E752X_SYSBUS_NERR, |
697 | &info->sysbus_nerr); | 822 | &info->sysbus_nerr); |
698 | pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); | 823 | pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); |
@@ -706,6 +831,10 @@ static void e752x_get_error_info(struct mem_ctl_info *mci, | |||
706 | pci_write_config_byte(dev, E752X_HI_NERR, | 831 | pci_write_config_byte(dev, E752X_HI_NERR, |
707 | info->hi_nerr); | 832 | info->hi_nerr); |
708 | 833 | ||
834 | if (info->nsi_nerr & NSI_ERR_MASK) | ||
835 | pci_write_config_dword(dev, I3100_NSI_NERR, | ||
836 | info->nsi_nerr); | ||
837 | |||
709 | if (info->sysbus_nerr) | 838 | if (info->sysbus_nerr) |
710 | pci_write_config_word(dev, E752X_SYSBUS_NERR, | 839 | pci_write_config_word(dev, E752X_SYSBUS_NERR, |
711 | info->sysbus_nerr); | 840 | info->sysbus_nerr); |
@@ -750,6 +879,7 @@ static int e752x_process_error_info(struct mem_ctl_info *mci, | |||
750 | global_error(0, stat32, &error_found, handle_errors); | 879 | global_error(0, stat32, &error_found, handle_errors); |
751 | 880 | ||
752 | e752x_check_hub_interface(info, &error_found, handle_errors); | 881 | e752x_check_hub_interface(info, &error_found, handle_errors); |
882 | e752x_check_ns_interface(info, &error_found, handle_errors); | ||
753 | e752x_check_sysbus(info, &error_found, handle_errors); | 883 | e752x_check_sysbus(info, &error_found, handle_errors); |
754 | e752x_check_membuf(info, &error_found, handle_errors); | 884 | e752x_check_membuf(info, &error_found, handle_errors); |
755 | e752x_check_dram(mci, info, &error_found, handle_errors); | 885 | e752x_check_dram(mci, info, &error_found, handle_errors); |
@@ -920,15 +1050,53 @@ fail: | |||
920 | return 1; | 1050 | return 1; |
921 | } | 1051 | } |
922 | 1052 | ||
1053 | /* Setup system bus parity mask register. | ||
1054 | * Sysbus parity supported on: | ||
1055 | * e7320/e7520/e7525 + Xeon | ||
1056 | * i3100 + Xeon/Celeron | ||
1057 | * Sysbus parity not supported on: | ||
1058 | * i3100 + Pentium M/Celeron M/Core Duo/Core2 Duo | ||
1059 | */ | ||
1060 | static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt) | ||
1061 | { | ||
1062 | char *cpu_id = cpu_data(0).x86_model_id; | ||
1063 | struct pci_dev *dev = pvt->dev_d0f1; | ||
1064 | int enable = 1; | ||
1065 | |||
1066 | /* Allow module paramter override, else see if CPU supports parity */ | ||
1067 | if (sysbus_parity != -1) { | ||
1068 | enable = sysbus_parity; | ||
1069 | } else if (cpu_id[0] && | ||
1070 | ((strstr(cpu_id, "Pentium") && strstr(cpu_id, " M ")) || | ||
1071 | (strstr(cpu_id, "Celeron") && strstr(cpu_id, " M ")) || | ||
1072 | (strstr(cpu_id, "Core") && strstr(cpu_id, "Duo")))) { | ||
1073 | e752x_printk(KERN_INFO, "System Bus Parity not " | ||
1074 | "supported by CPU, disabling\n"); | ||
1075 | enable = 0; | ||
1076 | } | ||
1077 | |||
1078 | if (enable) | ||
1079 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000); | ||
1080 | else | ||
1081 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309); | ||
1082 | } | ||
1083 | |||
923 | static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) | 1084 | static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) |
924 | { | 1085 | { |
925 | struct pci_dev *dev; | 1086 | struct pci_dev *dev; |
926 | 1087 | ||
927 | dev = pvt->dev_d0f1; | 1088 | dev = pvt->dev_d0f1; |
928 | /* Turn off error disable & SMI in case the BIOS turned it on */ | 1089 | /* Turn off error disable & SMI in case the BIOS turned it on */ |
929 | pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); | 1090 | if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { |
930 | pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); | 1091 | pci_write_config_dword(dev, I3100_NSI_EMASK, 0); |
931 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00); | 1092 | pci_write_config_dword(dev, I3100_NSI_SMICMD, 0); |
1093 | } else { | ||
1094 | pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); | ||
1095 | pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); | ||
1096 | } | ||
1097 | |||
1098 | e752x_init_sysbus_parity_mask(pvt); | ||
1099 | |||
932 | pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); | 1100 | pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); |
933 | pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); | 1101 | pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); |
934 | pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); | 1102 | pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); |
@@ -949,16 +1117,6 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
949 | debugf0("%s(): mci\n", __func__); | 1117 | debugf0("%s(): mci\n", __func__); |
950 | debugf0("Starting Probe1\n"); | 1118 | debugf0("Starting Probe1\n"); |
951 | 1119 | ||
952 | /* make sure error reporting method is sane */ | ||
953 | switch (edac_op_state) { | ||
954 | case EDAC_OPSTATE_POLL: | ||
955 | case EDAC_OPSTATE_NMI: | ||
956 | break; | ||
957 | default: | ||
958 | edac_op_state = EDAC_OPSTATE_POLL; | ||
959 | break; | ||
960 | } | ||
961 | |||
962 | /* check to see if device 0 function 1 is enabled; if it isn't, we | 1120 | /* check to see if device 0 function 1 is enabled; if it isn't, we |
963 | * assume the BIOS has reserved it for a reason and is expecting | 1121 | * assume the BIOS has reserved it for a reason and is expecting |
964 | * exclusive access, we take care not to violate that assumption and | 1122 | * exclusive access, we take care not to violate that assumption and |
@@ -985,8 +1143,9 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
985 | 1143 | ||
986 | debugf3("%s(): init mci\n", __func__); | 1144 | debugf3("%s(): init mci\n", __func__); |
987 | mci->mtype_cap = MEM_FLAG_RDDR; | 1145 | mci->mtype_cap = MEM_FLAG_RDDR; |
988 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | 1146 | /* 3100 IMCH supports SECDEC only */ |
989 | EDAC_FLAG_S4ECD4ED; | 1147 | mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : |
1148 | (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED); | ||
990 | /* FIXME - what if different memory types are in different csrows? */ | 1149 | /* FIXME - what if different memory types are in different csrows? */ |
991 | mci->mod_name = EDAC_MOD_STR; | 1150 | mci->mod_name = EDAC_MOD_STR; |
992 | mci->mod_ver = E752X_REVISION; | 1151 | mci->mod_ver = E752X_REVISION; |
@@ -1018,7 +1177,10 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
1018 | e752x_init_csrows(mci, pdev, ddrcsr); | 1177 | e752x_init_csrows(mci, pdev, ddrcsr); |
1019 | e752x_init_mem_map_table(pdev, pvt); | 1178 | e752x_init_mem_map_table(pdev, pvt); |
1020 | 1179 | ||
1021 | mci->edac_cap |= EDAC_FLAG_NONE; | 1180 | if (dev_idx == I3100) |
1181 | mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ | ||
1182 | else | ||
1183 | mci->edac_cap |= EDAC_FLAG_NONE; | ||
1022 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); | 1184 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); |
1023 | 1185 | ||
1024 | /* load the top of low memory, remap base, and remap limit vars */ | 1186 | /* load the top of low memory, remap base, and remap limit vars */ |
@@ -1110,6 +1272,9 @@ static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { | |||
1110 | PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 1272 | PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
1111 | E7320}, | 1273 | E7320}, |
1112 | { | 1274 | { |
1275 | PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
1276 | I3100}, | ||
1277 | { | ||
1113 | 0, | 1278 | 0, |
1114 | } /* 0 terminated list. */ | 1279 | } /* 0 terminated list. */ |
1115 | }; | 1280 | }; |
@@ -1128,6 +1293,10 @@ static int __init e752x_init(void) | |||
1128 | int pci_rc; | 1293 | int pci_rc; |
1129 | 1294 | ||
1130 | debugf3("%s()\n", __func__); | 1295 | debugf3("%s()\n", __func__); |
1296 | |||
1297 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
1298 | opstate_init(); | ||
1299 | |||
1131 | pci_rc = pci_register_driver(&e752x_driver); | 1300 | pci_rc = pci_register_driver(&e752x_driver); |
1132 | return (pci_rc < 0) ? pci_rc : 0; | 1301 | return (pci_rc < 0) ? pci_rc : 0; |
1133 | } | 1302 | } |
@@ -1143,10 +1312,15 @@ module_exit(e752x_exit); | |||
1143 | 1312 | ||
1144 | MODULE_LICENSE("GPL"); | 1313 | MODULE_LICENSE("GPL"); |
1145 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n"); | 1314 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n"); |
1146 | MODULE_DESCRIPTION("MC support for Intel e752x memory controllers"); | 1315 | MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers"); |
1147 | 1316 | ||
1148 | module_param(force_function_unhide, int, 0444); | 1317 | module_param(force_function_unhide, int, 0444); |
1149 | MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" | 1318 | MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" |
1150 | " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access"); | 1319 | " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access"); |
1320 | |||
1151 | module_param(edac_op_state, int, 0444); | 1321 | module_param(edac_op_state, int, 0444); |
1152 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | 1322 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); |
1323 | |||
1324 | module_param(sysbus_parity, int, 0444); | ||
1325 | MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking," | ||
1326 | " 1=enable system bus parity checking, default=auto-detect"); | ||
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 96ecc4926641..c7d11cc4e21a 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c | |||
@@ -414,16 +414,6 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
414 | 414 | ||
415 | debugf0("%s(): mci\n", __func__); | 415 | debugf0("%s(): mci\n", __func__); |
416 | 416 | ||
417 | /* make sure error reporting method is sane */ | ||
418 | switch (edac_op_state) { | ||
419 | case EDAC_OPSTATE_POLL: | ||
420 | case EDAC_OPSTATE_NMI: | ||
421 | break; | ||
422 | default: | ||
423 | edac_op_state = EDAC_OPSTATE_POLL; | ||
424 | break; | ||
425 | } | ||
426 | |||
427 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); | 417 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); |
428 | 418 | ||
429 | drc_chan = dual_channel_active(drc, dev_idx); | 419 | drc_chan = dual_channel_active(drc, dev_idx); |
@@ -565,6 +555,9 @@ static struct pci_driver e7xxx_driver = { | |||
565 | 555 | ||
566 | static int __init e7xxx_init(void) | 556 | static int __init e7xxx_init(void) |
567 | { | 557 | { |
558 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
559 | opstate_init(); | ||
560 | |||
568 | return pci_register_driver(&e7xxx_driver); | 561 | return pci_register_driver(&e7xxx_driver); |
569 | } | 562 | } |
570 | 563 | ||
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index b9552bc03dea..63372fa7ecfe 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c | |||
@@ -36,7 +36,7 @@ | |||
36 | * is protected by the 'device_ctls_mutex' lock | 36 | * is protected by the 'device_ctls_mutex' lock |
37 | */ | 37 | */ |
38 | static DEFINE_MUTEX(device_ctls_mutex); | 38 | static DEFINE_MUTEX(device_ctls_mutex); |
39 | static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list); | 39 | static LIST_HEAD(edac_device_list); |
40 | 40 | ||
41 | #ifdef CONFIG_EDAC_DEBUG | 41 | #ifdef CONFIG_EDAC_DEBUG |
42 | static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) | 42 | static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) |
@@ -375,37 +375,6 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info | |||
375 | wait_for_completion(&edac_device->removal_complete); | 375 | wait_for_completion(&edac_device->removal_complete); |
376 | } | 376 | } |
377 | 377 | ||
378 | /** | ||
379 | * edac_device_find | ||
380 | * Search for a edac_device_ctl_info structure whose index is 'idx'. | ||
381 | * | ||
382 | * If found, return a pointer to the structure. | ||
383 | * Else return NULL. | ||
384 | * | ||
385 | * Caller must hold device_ctls_mutex. | ||
386 | */ | ||
387 | struct edac_device_ctl_info *edac_device_find(int idx) | ||
388 | { | ||
389 | struct list_head *item; | ||
390 | struct edac_device_ctl_info *edac_dev; | ||
391 | |||
392 | /* Iterate over list, looking for exact match of ID */ | ||
393 | list_for_each(item, &edac_device_list) { | ||
394 | edac_dev = list_entry(item, struct edac_device_ctl_info, link); | ||
395 | |||
396 | if (edac_dev->dev_idx >= idx) { | ||
397 | if (edac_dev->dev_idx == idx) | ||
398 | return edac_dev; | ||
399 | |||
400 | /* not on list, so terminate early */ | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | return NULL; | ||
406 | } | ||
407 | EXPORT_SYMBOL_GPL(edac_device_find); | ||
408 | |||
409 | /* | 378 | /* |
410 | * edac_device_workq_function | 379 | * edac_device_workq_function |
411 | * performs the operation scheduled by a workq request | 380 | * performs the operation scheduled by a workq request |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 063a1bffe38b..a4cf1645f588 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -36,7 +36,7 @@ | |||
36 | 36 | ||
37 | /* lock to memory controller's control array */ | 37 | /* lock to memory controller's control array */ |
38 | static DEFINE_MUTEX(mem_ctls_mutex); | 38 | static DEFINE_MUTEX(mem_ctls_mutex); |
39 | static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); | 39 | static LIST_HEAD(mc_devices); |
40 | 40 | ||
41 | #ifdef CONFIG_EDAC_DEBUG | 41 | #ifdef CONFIG_EDAC_DEBUG |
42 | 42 | ||
@@ -886,24 +886,3 @@ void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, | |||
886 | mci->csrows[csrow].channels[channel].ce_count++; | 886 | mci->csrows[csrow].channels[channel].ce_count++; |
887 | } | 887 | } |
888 | EXPORT_SYMBOL(edac_mc_handle_fbd_ce); | 888 | EXPORT_SYMBOL(edac_mc_handle_fbd_ce); |
889 | |||
890 | /* | ||
891 | * Iterate over all MC instances and check for ECC, et al, errors | ||
892 | */ | ||
893 | void edac_check_mc_devices(void) | ||
894 | { | ||
895 | struct list_head *item; | ||
896 | struct mem_ctl_info *mci; | ||
897 | |||
898 | debugf3("%s()\n", __func__); | ||
899 | mutex_lock(&mem_ctls_mutex); | ||
900 | |||
901 | list_for_each(item, &mc_devices) { | ||
902 | mci = list_entry(item, struct mem_ctl_info, link); | ||
903 | |||
904 | if (mci->edac_check != NULL) | ||
905 | mci->edac_check(mci); | ||
906 | } | ||
907 | |||
908 | mutex_unlock(&mem_ctls_mutex); | ||
909 | } | ||
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index cbc419c8ebc1..233d4798c3aa 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h | |||
@@ -27,7 +27,6 @@ extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci); | |||
27 | extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci); | 27 | extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci); |
28 | extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); | 28 | extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); |
29 | extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); | 29 | extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); |
30 | extern void edac_check_mc_devices(void); | ||
31 | extern int edac_get_log_ue(void); | 30 | extern int edac_get_log_ue(void); |
32 | extern int edac_get_log_ce(void); | 31 | extern int edac_get_log_ce(void); |
33 | extern int edac_get_panic_on_ue(void); | 32 | extern int edac_get_panic_on_ue(void); |
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index 32be43576a8e..9b24340b52e1 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include "edac_module.h" | 29 | #include "edac_module.h" |
30 | 30 | ||
31 | static DEFINE_MUTEX(edac_pci_ctls_mutex); | 31 | static DEFINE_MUTEX(edac_pci_ctls_mutex); |
32 | static struct list_head edac_pci_list = LIST_HEAD_INIT(edac_pci_list); | 32 | static LIST_HEAD(edac_pci_list); |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * edac_pci_alloc_ctl_info | 35 | * edac_pci_alloc_ctl_info |
@@ -189,6 +189,9 @@ static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) | |||
189 | wait_for_completion(&pci->complete); | 189 | wait_for_completion(&pci->complete); |
190 | } | 190 | } |
191 | 191 | ||
192 | #if 0 | ||
193 | /* Older code, but might use in the future */ | ||
194 | |||
192 | /* | 195 | /* |
193 | * edac_pci_find() | 196 | * edac_pci_find() |
194 | * Search for an edac_pci_ctl_info structure whose index is 'idx' | 197 | * Search for an edac_pci_ctl_info structure whose index is 'idx' |
@@ -219,6 +222,7 @@ struct edac_pci_ctl_info *edac_pci_find(int idx) | |||
219 | return NULL; | 222 | return NULL; |
220 | } | 223 | } |
221 | EXPORT_SYMBOL_GPL(edac_pci_find); | 224 | EXPORT_SYMBOL_GPL(edac_pci_find); |
225 | #endif | ||
222 | 226 | ||
223 | /* | 227 | /* |
224 | * edac_pci_workq_function() | 228 | * edac_pci_workq_function() |
@@ -422,7 +426,7 @@ EXPORT_SYMBOL_GPL(edac_pci_del_device); | |||
422 | * | 426 | * |
423 | * a Generic parity check API | 427 | * a Generic parity check API |
424 | */ | 428 | */ |
425 | void edac_pci_generic_check(struct edac_pci_ctl_info *pci) | 429 | static void edac_pci_generic_check(struct edac_pci_ctl_info *pci) |
426 | { | 430 | { |
427 | debugf4("%s()\n", __func__); | 431 | debugf4("%s()\n", __func__); |
428 | edac_pci_do_parity_check(); | 432 | edac_pci_do_parity_check(); |
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 71c3195d3704..2c1fa1bb6df2 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c | |||
@@ -37,17 +37,17 @@ int edac_pci_get_check_errors(void) | |||
37 | return check_pci_errors; | 37 | return check_pci_errors; |
38 | } | 38 | } |
39 | 39 | ||
40 | int edac_pci_get_log_pe(void) | 40 | static int edac_pci_get_log_pe(void) |
41 | { | 41 | { |
42 | return edac_pci_log_pe; | 42 | return edac_pci_log_pe; |
43 | } | 43 | } |
44 | 44 | ||
45 | int edac_pci_get_log_npe(void) | 45 | static int edac_pci_get_log_npe(void) |
46 | { | 46 | { |
47 | return edac_pci_log_npe; | 47 | return edac_pci_log_npe; |
48 | } | 48 | } |
49 | 49 | ||
50 | int edac_pci_get_panic_on_pe(void) | 50 | static int edac_pci_get_panic_on_pe(void) |
51 | { | 51 | { |
52 | return edac_pci_panic_on_pe; | 52 | return edac_pci_panic_on_pe; |
53 | } | 53 | } |
@@ -197,7 +197,8 @@ error_out: | |||
197 | * | 197 | * |
198 | * unregister the kobj for the EDAC PCI instance | 198 | * unregister the kobj for the EDAC PCI instance |
199 | */ | 199 | */ |
200 | void edac_pci_unregister_sysfs_instance_kobj(struct edac_pci_ctl_info *pci) | 200 | static void edac_pci_unregister_sysfs_instance_kobj( |
201 | struct edac_pci_ctl_info *pci) | ||
201 | { | 202 | { |
202 | debugf0("%s()\n", __func__); | 203 | debugf0("%s()\n", __func__); |
203 | 204 | ||
@@ -337,7 +338,7 @@ static struct kobj_type ktype_edac_pci_main_kobj = { | |||
337 | * setup the sysfs for EDAC PCI attributes | 338 | * setup the sysfs for EDAC PCI attributes |
338 | * assumes edac_class has already been initialized | 339 | * assumes edac_class has already been initialized |
339 | */ | 340 | */ |
340 | int edac_pci_main_kobj_setup(void) | 341 | static int edac_pci_main_kobj_setup(void) |
341 | { | 342 | { |
342 | int err; | 343 | int err; |
343 | struct sysdev_class *edac_class; | 344 | struct sysdev_class *edac_class; |
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c index 5d4292811c14..6c9a0f2a593c 100644 --- a/drivers/edac/i3000_edac.c +++ b/drivers/edac/i3000_edac.c | |||
@@ -326,15 +326,6 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) | |||
326 | return -ENODEV; | 326 | return -ENODEV; |
327 | } | 327 | } |
328 | 328 | ||
329 | switch (edac_op_state) { | ||
330 | case EDAC_OPSTATE_POLL: | ||
331 | case EDAC_OPSTATE_NMI: | ||
332 | break; | ||
333 | default: | ||
334 | edac_op_state = EDAC_OPSTATE_POLL; | ||
335 | break; | ||
336 | } | ||
337 | |||
338 | c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */ | 329 | c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */ |
339 | c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */ | 330 | c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */ |
340 | c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */ | 331 | c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */ |
@@ -503,6 +494,10 @@ static int __init i3000_init(void) | |||
503 | int pci_rc; | 494 | int pci_rc; |
504 | 495 | ||
505 | debugf3("MC: %s()\n", __func__); | 496 | debugf3("MC: %s()\n", __func__); |
497 | |||
498 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
499 | opstate_init(); | ||
500 | |||
506 | pci_rc = pci_register_driver(&i3000_driver); | 501 | pci_rc = pci_register_driver(&i3000_driver); |
507 | if (pci_rc < 0) | 502 | if (pci_rc < 0) |
508 | goto fail0; | 503 | goto fail0; |
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 5a852017c17a..4a16b5b61cfb 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c | |||
@@ -1286,16 +1286,6 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) | |||
1286 | if (PCI_FUNC(pdev->devfn) != 0) | 1286 | if (PCI_FUNC(pdev->devfn) != 0) |
1287 | return -ENODEV; | 1287 | return -ENODEV; |
1288 | 1288 | ||
1289 | /* make sure error reporting method is sane */ | ||
1290 | switch (edac_op_state) { | ||
1291 | case EDAC_OPSTATE_POLL: | ||
1292 | case EDAC_OPSTATE_NMI: | ||
1293 | break; | ||
1294 | default: | ||
1295 | edac_op_state = EDAC_OPSTATE_POLL; | ||
1296 | break; | ||
1297 | } | ||
1298 | |||
1299 | /* Ask the devices for the number of CSROWS and CHANNELS so | 1289 | /* Ask the devices for the number of CSROWS and CHANNELS so |
1300 | * that we can calculate the memory resources, etc | 1290 | * that we can calculate the memory resources, etc |
1301 | * | 1291 | * |
@@ -1478,6 +1468,9 @@ static int __init i5000_init(void) | |||
1478 | 1468 | ||
1479 | debugf2("MC: " __FILE__ ": %s()\n", __func__); | 1469 | debugf2("MC: " __FILE__ ": %s()\n", __func__); |
1480 | 1470 | ||
1471 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
1472 | opstate_init(); | ||
1473 | |||
1481 | pci_rc = pci_register_driver(&i5000_driver); | 1474 | pci_rc = pci_register_driver(&i5000_driver); |
1482 | 1475 | ||
1483 | return (pci_rc < 0) ? pci_rc : 0; | 1476 | return (pci_rc < 0) ? pci_rc : 0; |
@@ -1501,5 +1494,6 @@ MODULE_AUTHOR | |||
1501 | ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>"); | 1494 | ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>"); |
1502 | MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " | 1495 | MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " |
1503 | I5000_REVISION); | 1496 | I5000_REVISION); |
1497 | |||
1504 | module_param(edac_op_state, int, 0444); | 1498 | module_param(edac_op_state, int, 0444); |
1505 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | 1499 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); |
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c index 83bfe37c4bbb..c5305e3ee434 100644 --- a/drivers/edac/i82443bxgx_edac.c +++ b/drivers/edac/i82443bxgx_edac.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | 31 | ||
32 | #include <linux/edac.h> | ||
32 | #include "edac_core.h" | 33 | #include "edac_core.h" |
33 | 34 | ||
34 | #define I82443_REVISION "0.1" | 35 | #define I82443_REVISION "0.1" |
@@ -386,6 +387,9 @@ static struct pci_driver i82443bxgx_edacmc_driver = { | |||
386 | 387 | ||
387 | static int __init i82443bxgx_edacmc_init(void) | 388 | static int __init i82443bxgx_edacmc_init(void) |
388 | { | 389 | { |
390 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
391 | opstate_init(); | ||
392 | |||
389 | return pci_register_driver(&i82443bxgx_edacmc_driver); | 393 | return pci_register_driver(&i82443bxgx_edacmc_driver); |
390 | } | 394 | } |
391 | 395 | ||
@@ -400,3 +404,6 @@ module_exit(i82443bxgx_edacmc_exit); | |||
400 | MODULE_LICENSE("GPL"); | 404 | MODULE_LICENSE("GPL"); |
401 | MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD"); | 405 | MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD"); |
402 | MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers"); | 406 | MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers"); |
407 | |||
408 | module_param(edac_op_state, int, 0444); | ||
409 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index f5ecd2c4d813..c0088ba9672b 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | #include <linux/pci_ids.h> | 15 | #include <linux/pci_ids.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/edac.h> | ||
17 | #include "edac_core.h" | 18 | #include "edac_core.h" |
18 | 19 | ||
19 | #define I82860_REVISION " Ver: 2.0.2 " __DATE__ | 20 | #define I82860_REVISION " Ver: 2.0.2 " __DATE__ |
@@ -294,6 +295,9 @@ static int __init i82860_init(void) | |||
294 | 295 | ||
295 | debugf3("%s()\n", __func__); | 296 | debugf3("%s()\n", __func__); |
296 | 297 | ||
298 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
299 | opstate_init(); | ||
300 | |||
297 | if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) | 301 | if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) |
298 | goto fail0; | 302 | goto fail0; |
299 | 303 | ||
@@ -345,3 +349,6 @@ MODULE_LICENSE("GPL"); | |||
345 | MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " | 349 | MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " |
346 | "Ben Woodard <woodard@redhat.com>"); | 350 | "Ben Woodard <woodard@redhat.com>"); |
347 | MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); | 351 | MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); |
352 | |||
353 | module_param(edac_op_state, int, 0444); | ||
354 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 031abadc439a..e43bdc43a1bf 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/pci_ids.h> | 19 | #include <linux/pci_ids.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/edac.h> | ||
21 | #include "edac_core.h" | 22 | #include "edac_core.h" |
22 | 23 | ||
23 | #define I82875P_REVISION " Ver: 2.0.2 " __DATE__ | 24 | #define I82875P_REVISION " Ver: 2.0.2 " __DATE__ |
@@ -393,6 +394,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
393 | struct i82875p_error_info discard; | 394 | struct i82875p_error_info discard; |
394 | 395 | ||
395 | debugf0("%s()\n", __func__); | 396 | debugf0("%s()\n", __func__); |
397 | |||
396 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | 398 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); |
397 | 399 | ||
398 | if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) | 400 | if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) |
@@ -532,6 +534,10 @@ static int __init i82875p_init(void) | |||
532 | int pci_rc; | 534 | int pci_rc; |
533 | 535 | ||
534 | debugf3("%s()\n", __func__); | 536 | debugf3("%s()\n", __func__); |
537 | |||
538 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
539 | opstate_init(); | ||
540 | |||
535 | pci_rc = pci_register_driver(&i82875p_driver); | 541 | pci_rc = pci_register_driver(&i82875p_driver); |
536 | 542 | ||
537 | if (pci_rc < 0) | 543 | if (pci_rc < 0) |
@@ -586,3 +592,6 @@ module_exit(i82875p_exit); | |||
586 | MODULE_LICENSE("GPL"); | 592 | MODULE_LICENSE("GPL"); |
587 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); | 593 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); |
588 | MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); | 594 | MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); |
595 | |||
596 | module_param(edac_op_state, int, 0444); | ||
597 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 0ee888456932..2eed3ea2cf62 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | #include <linux/pci_ids.h> | 15 | #include <linux/pci_ids.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | 17 | #include <linux/edac.h> | |
18 | #include "edac_core.h" | 18 | #include "edac_core.h" |
19 | 19 | ||
20 | #define I82975X_REVISION " Ver: 1.0.0 " __DATE__ | 20 | #define I82975X_REVISION " Ver: 1.0.0 " __DATE__ |
@@ -611,6 +611,9 @@ static int __init i82975x_init(void) | |||
611 | 611 | ||
612 | debugf3("%s()\n", __func__); | 612 | debugf3("%s()\n", __func__); |
613 | 613 | ||
614 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
615 | opstate_init(); | ||
616 | |||
614 | pci_rc = pci_register_driver(&i82975x_driver); | 617 | pci_rc = pci_register_driver(&i82975x_driver); |
615 | if (pci_rc < 0) | 618 | if (pci_rc < 0) |
616 | goto fail0; | 619 | goto fail0; |
@@ -664,3 +667,6 @@ module_exit(i82975x_exit); | |||
664 | MODULE_LICENSE("GPL"); | 667 | MODULE_LICENSE("GPL"); |
665 | MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>"); | 668 | MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>"); |
666 | MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); | 669 | MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); |
670 | |||
671 | module_param(edac_op_state, int, 0444); | ||
672 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c index 90320917be28..3fd65a563848 100644 --- a/drivers/edac/pasemi_edac.c +++ b/drivers/edac/pasemi_edac.c | |||
@@ -284,6 +284,9 @@ static struct pci_driver pasemi_edac_driver = { | |||
284 | 284 | ||
285 | static int __init pasemi_edac_init(void) | 285 | static int __init pasemi_edac_init(void) |
286 | { | 286 | { |
287 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
288 | opstate_init(); | ||
289 | |||
287 | return pci_register_driver(&pasemi_edac_driver); | 290 | return pci_register_driver(&pasemi_edac_driver); |
288 | } | 291 | } |
289 | 292 | ||
@@ -298,3 +301,6 @@ module_exit(pasemi_edac_exit); | |||
298 | MODULE_LICENSE("GPL"); | 301 | MODULE_LICENSE("GPL"); |
299 | MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); | 302 | MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); |
300 | MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller"); | 303 | MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller"); |
304 | module_param(edac_op_state, int, 0444); | ||
305 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
306 | |||
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index e25f712f2dc3..9900675e9598 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/pci_ids.h> | 21 | #include <linux/pci_ids.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/edac.h> | ||
23 | #include "edac_core.h" | 24 | #include "edac_core.h" |
24 | 25 | ||
25 | #define R82600_REVISION " Ver: 2.0.2 " __DATE__ | 26 | #define R82600_REVISION " Ver: 2.0.2 " __DATE__ |
@@ -393,6 +394,9 @@ static struct pci_driver r82600_driver = { | |||
393 | 394 | ||
394 | static int __init r82600_init(void) | 395 | static int __init r82600_init(void) |
395 | { | 396 | { |
397 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
398 | opstate_init(); | ||
399 | |||
396 | return pci_register_driver(&r82600_driver); | 400 | return pci_register_driver(&r82600_driver); |
397 | } | 401 | } |
398 | 402 | ||
@@ -412,3 +416,6 @@ MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers"); | |||
412 | module_param(disable_hardware_scrub, bool, 0644); | 416 | module_param(disable_hardware_scrub, bool, 0644); |
413 | MODULE_PARM_DESC(disable_hardware_scrub, | 417 | MODULE_PARM_DESC(disable_hardware_scrub, |
414 | "If set, disable the chipset's automatic scrub for CEs"); | 418 | "If set, disable the chipset's automatic scrub for CEs"); |
419 | |||
420 | module_param(edac_op_state, int, 0444); | ||
421 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 40ffd767647d..dc2cec6127d1 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig | |||
@@ -17,6 +17,15 @@ config EDD | |||
17 | obscure configurations. Most disk controller BIOS vendors do | 17 | obscure configurations. Most disk controller BIOS vendors do |
18 | not yet implement this feature. | 18 | not yet implement this feature. |
19 | 19 | ||
20 | config EDD_OFF | ||
21 | bool "Sets default behavior for EDD detection to off" | ||
22 | depends on EDD | ||
23 | default n | ||
24 | help | ||
25 | Say Y if you want EDD disabled by default, even though it is compiled into the | ||
26 | kernel. Say N if you want EDD enabled by default. EDD can be dynamically set | ||
27 | using the kernel parameter 'edd={on|skipmbr|off}'. | ||
28 | |||
20 | config EFI_VARS | 29 | config EFI_VARS |
21 | tristate "EFI Variable Support via sysfs" | 30 | tristate "EFI Variable Support via sysfs" |
22 | depends on EFI | 31 | depends on EFI |
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index f235940719e7..25918f7dfd0f 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -63,7 +63,7 @@ static void smi_data_buf_free(void) | |||
63 | return; | 63 | return; |
64 | 64 | ||
65 | dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", | 65 | dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", |
66 | __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size); | 66 | __func__, smi_data_buf_phys_addr, smi_data_buf_size); |
67 | 67 | ||
68 | dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf, | 68 | dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf, |
69 | smi_data_buf_handle); | 69 | smi_data_buf_handle); |
@@ -92,7 +92,7 @@ static int smi_data_buf_realloc(unsigned long size) | |||
92 | if (!buf) { | 92 | if (!buf) { |
93 | dev_dbg(&dcdbas_pdev->dev, | 93 | dev_dbg(&dcdbas_pdev->dev, |
94 | "%s: failed to allocate memory size %lu\n", | 94 | "%s: failed to allocate memory size %lu\n", |
95 | __FUNCTION__, size); | 95 | __func__, size); |
96 | return -ENOMEM; | 96 | return -ENOMEM; |
97 | } | 97 | } |
98 | /* memory zeroed by dma_alloc_coherent */ | 98 | /* memory zeroed by dma_alloc_coherent */ |
@@ -110,7 +110,7 @@ static int smi_data_buf_realloc(unsigned long size) | |||
110 | smi_data_buf_size = size; | 110 | smi_data_buf_size = size; |
111 | 111 | ||
112 | dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", | 112 | dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", |
113 | __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size); | 113 | __func__, smi_data_buf_phys_addr, smi_data_buf_size); |
114 | 114 | ||
115 | return 0; | 115 | return 0; |
116 | } | 116 | } |
@@ -258,7 +258,7 @@ static int smi_request(struct smi_cmd *smi_cmd) | |||
258 | 258 | ||
259 | if (smi_cmd->magic != SMI_CMD_MAGIC) { | 259 | if (smi_cmd->magic != SMI_CMD_MAGIC) { |
260 | dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n", | 260 | dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n", |
261 | __FUNCTION__); | 261 | __func__); |
262 | return -EBADR; | 262 | return -EBADR; |
263 | } | 263 | } |
264 | 264 | ||
@@ -267,7 +267,7 @@ static int smi_request(struct smi_cmd *smi_cmd) | |||
267 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); | 267 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); |
268 | if (smp_processor_id() != 0) { | 268 | if (smp_processor_id() != 0) { |
269 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", | 269 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", |
270 | __FUNCTION__); | 270 | __func__); |
271 | ret = -EBUSY; | 271 | ret = -EBUSY; |
272 | goto out; | 272 | goto out; |
273 | } | 273 | } |
@@ -428,7 +428,7 @@ static int host_control_smi(void) | |||
428 | 428 | ||
429 | default: | 429 | default: |
430 | dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n", | 430 | dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n", |
431 | __FUNCTION__, host_control_smi_type); | 431 | __func__, host_control_smi_type); |
432 | return -ENOSYS; | 432 | return -ENOSYS; |
433 | } | 433 | } |
434 | 434 | ||
@@ -456,13 +456,13 @@ static void dcdbas_host_control(void) | |||
456 | host_control_action = HC_ACTION_NONE; | 456 | host_control_action = HC_ACTION_NONE; |
457 | 457 | ||
458 | if (!smi_data_buf) { | 458 | if (!smi_data_buf) { |
459 | dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __FUNCTION__); | 459 | dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__); |
460 | return; | 460 | return; |
461 | } | 461 | } |
462 | 462 | ||
463 | if (smi_data_buf_size < sizeof(struct apm_cmd)) { | 463 | if (smi_data_buf_size < sizeof(struct apm_cmd)) { |
464 | dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n", | 464 | dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n", |
465 | __FUNCTION__); | 465 | __func__); |
466 | return; | 466 | return; |
467 | } | 467 | } |
468 | 468 | ||
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c index 477a3d0e3caf..6a8b1e037e07 100644 --- a/drivers/firmware/dell_rbu.c +++ b/drivers/firmware/dell_rbu.c | |||
@@ -123,7 +123,7 @@ static int create_packet(void *data, size_t length) | |||
123 | if (!newpacket) { | 123 | if (!newpacket) { |
124 | printk(KERN_WARNING | 124 | printk(KERN_WARNING |
125 | "dell_rbu:%s: failed to allocate new " | 125 | "dell_rbu:%s: failed to allocate new " |
126 | "packet\n", __FUNCTION__); | 126 | "packet\n", __func__); |
127 | retval = -ENOMEM; | 127 | retval = -ENOMEM; |
128 | spin_lock(&rbu_data.lock); | 128 | spin_lock(&rbu_data.lock); |
129 | goto out_noalloc; | 129 | goto out_noalloc; |
@@ -152,7 +152,7 @@ static int create_packet(void *data, size_t length) | |||
152 | printk(KERN_WARNING | 152 | printk(KERN_WARNING |
153 | "dell_rbu:%s: failed to allocate " | 153 | "dell_rbu:%s: failed to allocate " |
154 | "invalid_addr_packet_array \n", | 154 | "invalid_addr_packet_array \n", |
155 | __FUNCTION__); | 155 | __func__); |
156 | retval = -ENOMEM; | 156 | retval = -ENOMEM; |
157 | spin_lock(&rbu_data.lock); | 157 | spin_lock(&rbu_data.lock); |
158 | goto out_alloc_packet; | 158 | goto out_alloc_packet; |
@@ -164,7 +164,7 @@ static int create_packet(void *data, size_t length) | |||
164 | if (!packet_data_temp_buf) { | 164 | if (!packet_data_temp_buf) { |
165 | printk(KERN_WARNING | 165 | printk(KERN_WARNING |
166 | "dell_rbu:%s: failed to allocate new " | 166 | "dell_rbu:%s: failed to allocate new " |
167 | "packet\n", __FUNCTION__); | 167 | "packet\n", __func__); |
168 | retval = -ENOMEM; | 168 | retval = -ENOMEM; |
169 | spin_lock(&rbu_data.lock); | 169 | spin_lock(&rbu_data.lock); |
170 | goto out_alloc_packet_array; | 170 | goto out_alloc_packet_array; |
@@ -416,7 +416,7 @@ static int img_update_realloc(unsigned long size) | |||
416 | */ | 416 | */ |
417 | if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { | 417 | if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { |
418 | printk(KERN_ERR "dell_rbu:%s: corruption " | 418 | printk(KERN_ERR "dell_rbu:%s: corruption " |
419 | "check failed\n", __FUNCTION__); | 419 | "check failed\n", __func__); |
420 | return -EINVAL; | 420 | return -EINVAL; |
421 | } | 421 | } |
422 | /* | 422 | /* |
@@ -642,7 +642,7 @@ static ssize_t write_rbu_image_type(struct kobject *kobj, | |||
642 | if (req_firm_rc) { | 642 | if (req_firm_rc) { |
643 | printk(KERN_ERR | 643 | printk(KERN_ERR |
644 | "dell_rbu:%s request_firmware_nowait" | 644 | "dell_rbu:%s request_firmware_nowait" |
645 | " failed %d\n", __FUNCTION__, rc); | 645 | " failed %d\n", __func__, rc); |
646 | rc = -EIO; | 646 | rc = -EIO; |
647 | } else | 647 | } else |
648 | rbu_data.entry_created = 1; | 648 | rbu_data.entry_created = 1; |
@@ -718,7 +718,7 @@ static int __init dcdrbu_init(void) | |||
718 | if (IS_ERR(rbu_device)) { | 718 | if (IS_ERR(rbu_device)) { |
719 | printk(KERN_ERR | 719 | printk(KERN_ERR |
720 | "dell_rbu:%s:platform_device_register_simple " | 720 | "dell_rbu:%s:platform_device_register_simple " |
721 | "failed\n", __FUNCTION__); | 721 | "failed\n", __func__); |
722 | return PTR_ERR(rbu_device); | 722 | return PTR_ERR(rbu_device); |
723 | } | 723 | } |
724 | 724 | ||
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e03c67dd3e63..f43d6d3cf2fa 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -606,7 +606,7 @@ static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) | |||
606 | case 2: | 606 | case 2: |
607 | if ((end - start) < 2) | 607 | if ((end - start) < 2) |
608 | return NULL; | 608 | return NULL; |
609 | item->data.u16 = le16_to_cpu(get_unaligned((__le16*)start)); | 609 | item->data.u16 = get_unaligned_le16(start); |
610 | start = (__u8 *)((__le16 *)start + 1); | 610 | start = (__u8 *)((__le16 *)start + 1); |
611 | return start; | 611 | return start; |
612 | 612 | ||
@@ -614,7 +614,7 @@ static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) | |||
614 | item->size++; | 614 | item->size++; |
615 | if ((end - start) < 4) | 615 | if ((end - start) < 4) |
616 | return NULL; | 616 | return NULL; |
617 | item->data.u32 = le32_to_cpu(get_unaligned((__le32*)start)); | 617 | item->data.u32 = get_unaligned_le32(start); |
618 | start = (__u8 *)((__le32 *)start + 1); | 618 | start = (__u8 *)((__le32 *)start + 1); |
619 | return start; | 619 | return start; |
620 | } | 620 | } |
@@ -765,7 +765,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n) | |||
765 | 765 | ||
766 | report += offset >> 3; /* adjust byte index */ | 766 | report += offset >> 3; /* adjust byte index */ |
767 | offset &= 7; /* now only need bit offset into one byte */ | 767 | offset &= 7; /* now only need bit offset into one byte */ |
768 | x = le64_to_cpu(get_unaligned((__le64 *) report)); | 768 | x = get_unaligned_le64(report); |
769 | x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */ | 769 | x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */ |
770 | return (u32) x; | 770 | return (u32) x; |
771 | } | 771 | } |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 7b2f3815a838..8d6ad812a014 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
@@ -822,6 +822,7 @@ static int ide_drivers_open(struct inode *inode, struct file *file) | |||
822 | } | 822 | } |
823 | 823 | ||
824 | static const struct file_operations ide_drivers_operations = { | 824 | static const struct file_operations ide_drivers_operations = { |
825 | .owner = THIS_MODULE, | ||
825 | .open = ide_drivers_open, | 826 | .open = ide_drivers_open, |
826 | .read = seq_read, | 827 | .read = seq_read, |
827 | .llseek = seq_lseek, | 828 | .llseek = seq_lseek, |
@@ -830,16 +831,12 @@ static const struct file_operations ide_drivers_operations = { | |||
830 | 831 | ||
831 | void proc_ide_create(void) | 832 | void proc_ide_create(void) |
832 | { | 833 | { |
833 | struct proc_dir_entry *entry; | ||
834 | |||
835 | proc_ide_root = proc_mkdir("ide", NULL); | 834 | proc_ide_root = proc_mkdir("ide", NULL); |
836 | 835 | ||
837 | if (!proc_ide_root) | 836 | if (!proc_ide_root) |
838 | return; | 837 | return; |
839 | 838 | ||
840 | entry = create_proc_entry("drivers", 0, proc_ide_root); | 839 | proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations); |
841 | if (entry) | ||
842 | entry->proc_fops = &ide_drivers_operations; | ||
843 | } | 840 | } |
844 | 841 | ||
845 | void proc_ide_destroy(void) | 842 | void proc_ide_destroy(void) |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 4e3128ff73c1..fe78f7d25099 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/hugetlb.h> | 40 | #include <linux/hugetlb.h> |
41 | #include <linux/dma-attrs.h> | ||
41 | 42 | ||
42 | #include "uverbs.h" | 43 | #include "uverbs.h" |
43 | 44 | ||
@@ -72,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d | |||
72 | * @addr: userspace virtual address to start at | 73 | * @addr: userspace virtual address to start at |
73 | * @size: length of region to pin | 74 | * @size: length of region to pin |
74 | * @access: IB_ACCESS_xxx flags for memory being pinned | 75 | * @access: IB_ACCESS_xxx flags for memory being pinned |
76 | * @dmasync: flush in-flight DMA when the memory region is written | ||
75 | */ | 77 | */ |
76 | struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | 78 | struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, |
77 | size_t size, int access) | 79 | size_t size, int access, int dmasync) |
78 | { | 80 | { |
79 | struct ib_umem *umem; | 81 | struct ib_umem *umem; |
80 | struct page **page_list; | 82 | struct page **page_list; |
@@ -87,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
87 | int ret; | 89 | int ret; |
88 | int off; | 90 | int off; |
89 | int i; | 91 | int i; |
92 | DEFINE_DMA_ATTRS(attrs); | ||
93 | |||
94 | if (dmasync) | ||
95 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | ||
90 | 96 | ||
91 | if (!can_do_mlock()) | 97 | if (!can_do_mlock()) |
92 | return ERR_PTR(-EPERM); | 98 | return ERR_PTR(-EPERM); |
@@ -174,10 +180,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
174 | sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); | 180 | sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); |
175 | } | 181 | } |
176 | 182 | ||
177 | chunk->nmap = ib_dma_map_sg(context->device, | 183 | chunk->nmap = ib_dma_map_sg_attrs(context->device, |
178 | &chunk->page_list[0], | 184 | &chunk->page_list[0], |
179 | chunk->nents, | 185 | chunk->nents, |
180 | DMA_BIDIRECTIONAL); | 186 | DMA_BIDIRECTIONAL, |
187 | &attrs); | ||
181 | if (chunk->nmap <= 0) { | 188 | if (chunk->nmap <= 0) { |
182 | for (i = 0; i < chunk->nents; ++i) | 189 | for (i = 0; i < chunk->nents; ++i) |
183 | put_page(sg_page(&chunk->page_list[i])); | 190 | put_page(sg_page(&chunk->page_list[i])); |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 6af2c0f79a67..2acf9b62cf99 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -452,7 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
452 | return ERR_PTR(-ENOMEM); | 452 | return ERR_PTR(-ENOMEM); |
453 | c2mr->pd = c2pd; | 453 | c2mr->pd = c2pd; |
454 | 454 | ||
455 | c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); | 455 | c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); |
456 | if (IS_ERR(c2mr->umem)) { | 456 | if (IS_ERR(c2mr->umem)) { |
457 | err = PTR_ERR(c2mr->umem); | 457 | err = PTR_ERR(c2mr->umem); |
458 | kfree(c2mr); | 458 | kfree(c2mr); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index ab4695c1dd56..e343e9e64844 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -602,7 +602,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
602 | if (!mhp) | 602 | if (!mhp) |
603 | return ERR_PTR(-ENOMEM); | 603 | return ERR_PTR(-ENOMEM); |
604 | 604 | ||
605 | mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc); | 605 | mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); |
606 | if (IS_ERR(mhp->umem)) { | 606 | if (IS_ERR(mhp->umem)) { |
607 | err = PTR_ERR(mhp->umem); | 607 | err = PTR_ERR(mhp->umem); |
608 | kfree(mhp); | 608 | kfree(mhp); |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 46ae4eb2c4e1..f974367cad40 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -323,7 +323,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
323 | } | 323 | } |
324 | 324 | ||
325 | e_mr->umem = ib_umem_get(pd->uobject->context, start, length, | 325 | e_mr->umem = ib_umem_get(pd->uobject->context, start, length, |
326 | mr_access_flags); | 326 | mr_access_flags, 0); |
327 | if (IS_ERR(e_mr->umem)) { | 327 | if (IS_ERR(e_mr->umem)) { |
328 | ib_mr = (void *)e_mr->umem; | 328 | ib_mr = (void *)e_mr->umem; |
329 | goto reg_user_mr_exit1; | 329 | goto reg_user_mr_exit1; |
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index db4ba92f79fc..9d343b7c2f3b 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c | |||
@@ -195,7 +195,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
195 | goto bail; | 195 | goto bail; |
196 | } | 196 | } |
197 | 197 | ||
198 | umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags); | 198 | umem = ib_umem_get(pd->uobject->context, start, length, |
199 | mr_access_flags, 0); | ||
199 | if (IS_ERR(umem)) | 200 | if (IS_ERR(umem)) |
200 | return (void *) umem; | 201 | return (void *) umem; |
201 | 202 | ||
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5e570bb0bb6f..e3dddfc687f9 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -137,7 +137,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont | |||
137 | int err; | 137 | int err; |
138 | 138 | ||
139 | *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), | 139 | *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), |
140 | IB_ACCESS_LOCAL_WRITE); | 140 | IB_ACCESS_LOCAL_WRITE, 1); |
141 | if (IS_ERR(*umem)) | 141 | if (IS_ERR(*umem)) |
142 | return PTR_ERR(*umem); | 142 | return PTR_ERR(*umem); |
143 | 143 | ||
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 8e342cc9baec..8aee4233b388 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c | |||
@@ -63,7 +63,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | |||
63 | page->user_virt = (virt & PAGE_MASK); | 63 | page->user_virt = (virt & PAGE_MASK); |
64 | page->refcnt = 0; | 64 | page->refcnt = 0; |
65 | page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, | 65 | page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, |
66 | PAGE_SIZE, 0); | 66 | PAGE_SIZE, 0, 0); |
67 | if (IS_ERR(page->umem)) { | 67 | if (IS_ERR(page->umem)) { |
68 | err = PTR_ERR(page->umem); | 68 | err = PTR_ERR(page->umem); |
69 | kfree(page); | 69 | kfree(page); |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index fe2c2e94a5f8..68e92485fc76 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -132,7 +132,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
132 | if (!mr) | 132 | if (!mr) |
133 | return ERR_PTR(-ENOMEM); | 133 | return ERR_PTR(-ENOMEM); |
134 | 134 | ||
135 | mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags); | 135 | mr->umem = ib_umem_get(pd->uobject->context, start, length, |
136 | access_flags, 0); | ||
136 | if (IS_ERR(mr->umem)) { | 137 | if (IS_ERR(mr->umem)) { |
137 | err = PTR_ERR(mr->umem); | 138 | err = PTR_ERR(mr->umem); |
138 | goto err_free; | 139 | goto err_free; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 80ea8b9e7761..8e02ecfec188 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -482,7 +482,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
482 | goto err; | 482 | goto err; |
483 | 483 | ||
484 | qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, | 484 | qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, |
485 | qp->buf_size, 0); | 485 | qp->buf_size, 0, 0); |
486 | if (IS_ERR(qp->umem)) { | 486 | if (IS_ERR(qp->umem)) { |
487 | err = PTR_ERR(qp->umem); | 487 | err = PTR_ERR(qp->umem); |
488 | goto err; | 488 | goto err; |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 204619702f9d..12d6bc6f8007 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -109,7 +109,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
109 | } | 109 | } |
110 | 110 | ||
111 | srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, | 111 | srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, |
112 | buf_size, 0); | 112 | buf_size, 0, 0); |
113 | if (IS_ERR(srq->umem)) { | 113 | if (IS_ERR(srq->umem)) { |
114 | err = PTR_ERR(srq->umem); | 114 | err = PTR_ERR(srq->umem); |
115 | goto err_srq; | 115 | goto err_srq; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 696e1f302332..2a9f460cf061 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -1006,17 +1006,23 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1006 | struct mthca_dev *dev = to_mdev(pd->device); | 1006 | struct mthca_dev *dev = to_mdev(pd->device); |
1007 | struct ib_umem_chunk *chunk; | 1007 | struct ib_umem_chunk *chunk; |
1008 | struct mthca_mr *mr; | 1008 | struct mthca_mr *mr; |
1009 | struct mthca_reg_mr ucmd; | ||
1009 | u64 *pages; | 1010 | u64 *pages; |
1010 | int shift, n, len; | 1011 | int shift, n, len; |
1011 | int i, j, k; | 1012 | int i, j, k; |
1012 | int err = 0; | 1013 | int err = 0; |
1013 | int write_mtt_size; | 1014 | int write_mtt_size; |
1014 | 1015 | ||
1016 | if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) | ||
1017 | return ERR_PTR(-EFAULT); | ||
1018 | |||
1015 | mr = kmalloc(sizeof *mr, GFP_KERNEL); | 1019 | mr = kmalloc(sizeof *mr, GFP_KERNEL); |
1016 | if (!mr) | 1020 | if (!mr) |
1017 | return ERR_PTR(-ENOMEM); | 1021 | return ERR_PTR(-ENOMEM); |
1018 | 1022 | ||
1019 | mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); | 1023 | mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, |
1024 | ucmd.mr_attrs & MTHCA_MR_DMASYNC); | ||
1025 | |||
1020 | if (IS_ERR(mr->umem)) { | 1026 | if (IS_ERR(mr->umem)) { |
1021 | err = PTR_ERR(mr->umem); | 1027 | err = PTR_ERR(mr->umem); |
1022 | goto err; | 1028 | goto err; |
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h index 02cc0a766f3a..f8cb3b664d37 100644 --- a/drivers/infiniband/hw/mthca/mthca_user.h +++ b/drivers/infiniband/hw/mthca/mthca_user.h | |||
@@ -41,7 +41,7 @@ | |||
41 | * Increment this value if any changes that break userspace ABI | 41 | * Increment this value if any changes that break userspace ABI |
42 | * compatibility are made. | 42 | * compatibility are made. |
43 | */ | 43 | */ |
44 | #define MTHCA_UVERBS_ABI_VERSION 1 | 44 | #define MTHCA_UVERBS_ABI_VERSION 2 |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * Make sure that all structs defined in this file remain laid out so | 47 | * Make sure that all structs defined in this file remain laid out so |
@@ -61,6 +61,14 @@ struct mthca_alloc_pd_resp { | |||
61 | __u32 reserved; | 61 | __u32 reserved; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | struct mthca_reg_mr { | ||
65 | __u32 mr_attrs; | ||
66 | #define MTHCA_MR_DMASYNC 0x1 | ||
67 | /* mark the memory region with a DMA attribute that causes | ||
68 | * in-flight DMA to be flushed when the region is written to */ | ||
69 | __u32 reserved; | ||
70 | }; | ||
71 | |||
64 | struct mthca_create_cq { | 72 | struct mthca_create_cq { |
65 | __u32 lkey; | 73 | __u32 lkey; |
66 | __u32 pdn; | 74 | __u32 pdn; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index ee74f7c7a6da..9ae397a0ff7e 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -2377,7 +2377,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
2377 | u8 single_page = 1; | 2377 | u8 single_page = 1; |
2378 | u8 stag_key; | 2378 | u8 stag_key; |
2379 | 2379 | ||
2380 | region = ib_umem_get(pd->uobject->context, start, length, acc); | 2380 | region = ib_umem_get(pd->uobject->context, start, length, acc, 0); |
2381 | if (IS_ERR(region)) { | 2381 | if (IS_ERR(region)) { |
2382 | return (struct ib_mr *)region; | 2382 | return (struct ib_mr *)region; |
2383 | } | 2383 | } |
diff --git a/drivers/input/input.c b/drivers/input/input.c index f02c242c3114..27006fc18305 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -898,30 +898,26 @@ static int __init input_proc_init(void) | |||
898 | { | 898 | { |
899 | struct proc_dir_entry *entry; | 899 | struct proc_dir_entry *entry; |
900 | 900 | ||
901 | proc_bus_input_dir = proc_mkdir("input", proc_bus); | 901 | proc_bus_input_dir = proc_mkdir("bus/input", NULL); |
902 | if (!proc_bus_input_dir) | 902 | if (!proc_bus_input_dir) |
903 | return -ENOMEM; | 903 | return -ENOMEM; |
904 | 904 | ||
905 | proc_bus_input_dir->owner = THIS_MODULE; | 905 | proc_bus_input_dir->owner = THIS_MODULE; |
906 | 906 | ||
907 | entry = create_proc_entry("devices", 0, proc_bus_input_dir); | 907 | entry = proc_create("devices", 0, proc_bus_input_dir, |
908 | &input_devices_fileops); | ||
908 | if (!entry) | 909 | if (!entry) |
909 | goto fail1; | 910 | goto fail1; |
910 | 911 | ||
911 | entry->owner = THIS_MODULE; | 912 | entry = proc_create("handlers", 0, proc_bus_input_dir, |
912 | entry->proc_fops = &input_devices_fileops; | 913 | &input_handlers_fileops); |
913 | |||
914 | entry = create_proc_entry("handlers", 0, proc_bus_input_dir); | ||
915 | if (!entry) | 914 | if (!entry) |
916 | goto fail2; | 915 | goto fail2; |
917 | 916 | ||
918 | entry->owner = THIS_MODULE; | ||
919 | entry->proc_fops = &input_handlers_fileops; | ||
920 | |||
921 | return 0; | 917 | return 0; |
922 | 918 | ||
923 | fail2: remove_proc_entry("devices", proc_bus_input_dir); | 919 | fail2: remove_proc_entry("devices", proc_bus_input_dir); |
924 | fail1: remove_proc_entry("input", proc_bus); | 920 | fail1: remove_proc_entry("bus/input", NULL); |
925 | return -ENOMEM; | 921 | return -ENOMEM; |
926 | } | 922 | } |
927 | 923 | ||
@@ -929,7 +925,7 @@ static void input_proc_exit(void) | |||
929 | { | 925 | { |
930 | remove_proc_entry("devices", proc_bus_input_dir); | 926 | remove_proc_entry("devices", proc_bus_input_dir); |
931 | remove_proc_entry("handlers", proc_bus_input_dir); | 927 | remove_proc_entry("handlers", proc_bus_input_dir); |
932 | remove_proc_entry("input", proc_bus); | 928 | remove_proc_entry("bus/input", NULL); |
933 | } | 929 | } |
934 | 930 | ||
935 | #else /* !CONFIG_PROC_FS */ | 931 | #else /* !CONFIG_PROC_FS */ |
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 1d759f6f8076..55c1134d6137 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c | |||
@@ -528,9 +528,9 @@ static void aiptek_irq(struct urb *urb) | |||
528 | (aiptek->curSetting.pointerMode)) { | 528 | (aiptek->curSetting.pointerMode)) { |
529 | aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; | 529 | aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; |
530 | } else { | 530 | } else { |
531 | x = le16_to_cpu(get_unaligned((__le16 *) (data + 1))); | 531 | x = get_unaligned_le16(data + 1); |
532 | y = le16_to_cpu(get_unaligned((__le16 *) (data + 3))); | 532 | y = get_unaligned_le16(data + 3); |
533 | z = le16_to_cpu(get_unaligned((__le16 *) (data + 6))); | 533 | z = get_unaligned_le16(data + 6); |
534 | 534 | ||
535 | dv = (data[5] & 0x01) != 0 ? 1 : 0; | 535 | dv = (data[5] & 0x01) != 0 ? 1 : 0; |
536 | p = (data[5] & 0x02) != 0 ? 1 : 0; | 536 | p = (data[5] & 0x02) != 0 ? 1 : 0; |
@@ -613,8 +613,8 @@ static void aiptek_irq(struct urb *urb) | |||
613 | (aiptek->curSetting.pointerMode)) { | 613 | (aiptek->curSetting.pointerMode)) { |
614 | aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; | 614 | aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; |
615 | } else { | 615 | } else { |
616 | x = le16_to_cpu(get_unaligned((__le16 *) (data + 1))); | 616 | x = get_unaligned_le16(data + 1); |
617 | y = le16_to_cpu(get_unaligned((__le16 *) (data + 3))); | 617 | y = get_unaligned_le16(data + 3); |
618 | 618 | ||
619 | jitterable = data[5] & 0x1c; | 619 | jitterable = data[5] & 0x1c; |
620 | 620 | ||
@@ -679,7 +679,7 @@ static void aiptek_irq(struct urb *urb) | |||
679 | pck = (data[1] & aiptek->curSetting.stylusButtonUpper) != 0 ? 1 : 0; | 679 | pck = (data[1] & aiptek->curSetting.stylusButtonUpper) != 0 ? 1 : 0; |
680 | 680 | ||
681 | macro = dv && p && tip && !(data[3] & 1) ? (data[3] >> 1) : -1; | 681 | macro = dv && p && tip && !(data[3] & 1) ? (data[3] >> 1) : -1; |
682 | z = le16_to_cpu(get_unaligned((__le16 *) (data + 4))); | 682 | z = get_unaligned_le16(data + 4); |
683 | 683 | ||
684 | if (dv) { | 684 | if (dv) { |
685 | /* If the selected tool changed, reset the old | 685 | /* If the selected tool changed, reset the old |
@@ -757,7 +757,7 @@ static void aiptek_irq(struct urb *urb) | |||
757 | * hat switches (which just so happen to be the macroKeys.) | 757 | * hat switches (which just so happen to be the macroKeys.) |
758 | */ | 758 | */ |
759 | else if (data[0] == 6) { | 759 | else if (data[0] == 6) { |
760 | macro = le16_to_cpu(get_unaligned((__le16 *) (data + 1))); | 760 | macro = get_unaligned_le16(data + 1); |
761 | if (macro > 0) { | 761 | if (macro > 0) { |
762 | input_report_key(inputdev, macroKeyEvents[macro - 1], | 762 | input_report_key(inputdev, macroKeyEvents[macro - 1], |
763 | 0); | 763 | 0); |
@@ -952,7 +952,7 @@ aiptek_query(struct aiptek *aiptek, unsigned char command, unsigned char data) | |||
952 | buf[0], buf[1], buf[2]); | 952 | buf[0], buf[1], buf[2]); |
953 | ret = -EIO; | 953 | ret = -EIO; |
954 | } else { | 954 | } else { |
955 | ret = le16_to_cpu(get_unaligned((__le16 *) (buf + 1))); | 955 | ret = get_unaligned_le16(buf + 1); |
956 | } | 956 | } |
957 | kfree(buf); | 957 | kfree(buf); |
958 | return ret; | 958 | return ret; |
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index f66ca215cdec..c5a8661a1baa 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c | |||
@@ -245,11 +245,11 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report, | |||
245 | data = report[i]; | 245 | data = report[i]; |
246 | break; | 246 | break; |
247 | case 2: | 247 | case 2: |
248 | data16 = le16_to_cpu(get_unaligned((__le16 *)&report[i])); | 248 | data16 = get_unaligned_le16(&report[i]); |
249 | break; | 249 | break; |
250 | case 3: | 250 | case 3: |
251 | size = 4; | 251 | size = 4; |
252 | data32 = le32_to_cpu(get_unaligned((__le32 *)&report[i])); | 252 | data32 = get_unaligned_le32(&report[i]); |
253 | break; | 253 | break; |
254 | } | 254 | } |
255 | 255 | ||
@@ -695,10 +695,10 @@ static void gtco_urb_callback(struct urb *urbinfo) | |||
695 | /* Fall thru */ | 695 | /* Fall thru */ |
696 | case 1: | 696 | case 1: |
697 | /* All reports have X and Y coords in the same place */ | 697 | /* All reports have X and Y coords in the same place */ |
698 | val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1])); | 698 | val = get_unaligned_le16(&device->buffer[1]); |
699 | input_report_abs(inputdev, ABS_X, val); | 699 | input_report_abs(inputdev, ABS_X, val); |
700 | 700 | ||
701 | val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3])); | 701 | val = get_unaligned_le16(&device->buffer[3]); |
702 | input_report_abs(inputdev, ABS_Y, val); | 702 | input_report_abs(inputdev, ABS_Y, val); |
703 | 703 | ||
704 | /* Ditto for proximity bit */ | 704 | /* Ditto for proximity bit */ |
@@ -762,7 +762,7 @@ static void gtco_urb_callback(struct urb *urbinfo) | |||
762 | le_buffer[1] = (u8)(device->buffer[4] >> 1); | 762 | le_buffer[1] = (u8)(device->buffer[4] >> 1); |
763 | le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7); | 763 | le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7); |
764 | 764 | ||
765 | val = le16_to_cpu(get_unaligned((__le16 *)le_buffer)); | 765 | val = get_unaligned_le16(le_buffer); |
766 | input_report_abs(inputdev, ABS_Y, val); | 766 | input_report_abs(inputdev, ABS_Y, val); |
767 | 767 | ||
768 | /* | 768 | /* |
@@ -772,10 +772,10 @@ static void gtco_urb_callback(struct urb *urbinfo) | |||
772 | buttonbyte = device->buffer[5] >> 1; | 772 | buttonbyte = device->buffer[5] >> 1; |
773 | } else { | 773 | } else { |
774 | 774 | ||
775 | val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1])); | 775 | val = get_unaligned_le16(&device->buffer[1]); |
776 | input_report_abs(inputdev, ABS_X, val); | 776 | input_report_abs(inputdev, ABS_X, val); |
777 | 777 | ||
778 | val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3])); | 778 | val = get_unaligned_le16(&device->buffer[3]); |
779 | input_report_abs(inputdev, ABS_Y, val); | 779 | input_report_abs(inputdev, ABS_Y, val); |
780 | 780 | ||
781 | buttonbyte = device->buffer[5]; | 781 | buttonbyte = device->buffer[5]; |
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index 1182fc133167..f23f5a97fb38 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c | |||
@@ -63,8 +63,8 @@ static void kbtab_irq(struct urb *urb) | |||
63 | goto exit; | 63 | goto exit; |
64 | } | 64 | } |
65 | 65 | ||
66 | kbtab->x = le16_to_cpu(get_unaligned((__le16 *) &data[1])); | 66 | kbtab->x = get_unaligned_le16(&data[1]); |
67 | kbtab->y = le16_to_cpu(get_unaligned((__le16 *) &data[3])); | 67 | kbtab->y = get_unaligned_le16(&data[3]); |
68 | 68 | ||
69 | kbtab->pressure = (data[5]); | 69 | kbtab->pressure = (data[5]); |
70 | 70 | ||
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c index 845a797b0030..c29208bd7521 100644 --- a/drivers/isdn/capi/kcapi_proc.c +++ b/drivers/isdn/capi/kcapi_proc.c | |||
@@ -114,6 +114,7 @@ static int seq_contrstats_open(struct inode *inode, struct file *file) | |||
114 | } | 114 | } |
115 | 115 | ||
116 | static const struct file_operations proc_controller_ops = { | 116 | static const struct file_operations proc_controller_ops = { |
117 | .owner = THIS_MODULE, | ||
117 | .open = seq_controller_open, | 118 | .open = seq_controller_open, |
118 | .read = seq_read, | 119 | .read = seq_read, |
119 | .llseek = seq_lseek, | 120 | .llseek = seq_lseek, |
@@ -121,6 +122,7 @@ static const struct file_operations proc_controller_ops = { | |||
121 | }; | 122 | }; |
122 | 123 | ||
123 | static const struct file_operations proc_contrstats_ops = { | 124 | static const struct file_operations proc_contrstats_ops = { |
125 | .owner = THIS_MODULE, | ||
124 | .open = seq_contrstats_open, | 126 | .open = seq_contrstats_open, |
125 | .read = seq_read, | 127 | .read = seq_read, |
126 | .llseek = seq_lseek, | 128 | .llseek = seq_lseek, |
@@ -219,6 +221,7 @@ seq_applstats_open(struct inode *inode, struct file *file) | |||
219 | } | 221 | } |
220 | 222 | ||
221 | static const struct file_operations proc_applications_ops = { | 223 | static const struct file_operations proc_applications_ops = { |
224 | .owner = THIS_MODULE, | ||
222 | .open = seq_applications_open, | 225 | .open = seq_applications_open, |
223 | .read = seq_read, | 226 | .read = seq_read, |
224 | .llseek = seq_lseek, | 227 | .llseek = seq_lseek, |
@@ -226,21 +229,13 @@ static const struct file_operations proc_applications_ops = { | |||
226 | }; | 229 | }; |
227 | 230 | ||
228 | static const struct file_operations proc_applstats_ops = { | 231 | static const struct file_operations proc_applstats_ops = { |
232 | .owner = THIS_MODULE, | ||
229 | .open = seq_applstats_open, | 233 | .open = seq_applstats_open, |
230 | .read = seq_read, | 234 | .read = seq_read, |
231 | .llseek = seq_lseek, | 235 | .llseek = seq_lseek, |
232 | .release = seq_release, | 236 | .release = seq_release, |
233 | }; | 237 | }; |
234 | 238 | ||
235 | static void | ||
236 | create_seq_entry(char *name, mode_t mode, const struct file_operations *f) | ||
237 | { | ||
238 | struct proc_dir_entry *entry; | ||
239 | entry = create_proc_entry(name, mode, NULL); | ||
240 | if (entry) | ||
241 | entry->proc_fops = f; | ||
242 | } | ||
243 | |||
244 | // --------------------------------------------------------------------------- | 239 | // --------------------------------------------------------------------------- |
245 | 240 | ||
246 | static void *capi_driver_start(struct seq_file *seq, loff_t *pos) | 241 | static void *capi_driver_start(struct seq_file *seq, loff_t *pos) |
@@ -283,6 +278,7 @@ seq_capi_driver_open(struct inode *inode, struct file *file) | |||
283 | } | 278 | } |
284 | 279 | ||
285 | static const struct file_operations proc_driver_ops = { | 280 | static const struct file_operations proc_driver_ops = { |
281 | .owner = THIS_MODULE, | ||
286 | .open = seq_capi_driver_open, | 282 | .open = seq_capi_driver_open, |
287 | .read = seq_read, | 283 | .read = seq_read, |
288 | .llseek = seq_lseek, | 284 | .llseek = seq_lseek, |
@@ -296,11 +292,11 @@ kcapi_proc_init(void) | |||
296 | { | 292 | { |
297 | proc_mkdir("capi", NULL); | 293 | proc_mkdir("capi", NULL); |
298 | proc_mkdir("capi/controllers", NULL); | 294 | proc_mkdir("capi/controllers", NULL); |
299 | create_seq_entry("capi/controller", 0, &proc_controller_ops); | 295 | proc_create("capi/controller", 0, NULL, &proc_controller_ops); |
300 | create_seq_entry("capi/contrstats", 0, &proc_contrstats_ops); | 296 | proc_create("capi/contrstats", 0, NULL, &proc_contrstats_ops); |
301 | create_seq_entry("capi/applications", 0, &proc_applications_ops); | 297 | proc_create("capi/applications", 0, NULL, &proc_applications_ops); |
302 | create_seq_entry("capi/applstats", 0, &proc_applstats_ops); | 298 | proc_create("capi/applstats", 0, NULL, &proc_applstats_ops); |
303 | create_seq_entry("capi/driver", 0, &proc_driver_ops); | 299 | proc_create("capi/driver", 0, NULL, &proc_driver_ops); |
304 | } | 300 | } |
305 | 301 | ||
306 | void __exit | 302 | void __exit |
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index 4fd4c46892e3..8b256a617c8a 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c | |||
@@ -288,13 +288,12 @@ divert_dev_init(void) | |||
288 | isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net); | 288 | isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net); |
289 | if (!isdn_proc_entry) | 289 | if (!isdn_proc_entry) |
290 | return (-1); | 290 | return (-1); |
291 | isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry); | 291 | isdn_divert_entry = proc_create("divert", S_IFREG | S_IRUGO, |
292 | isdn_proc_entry, &isdn_fops); | ||
292 | if (!isdn_divert_entry) { | 293 | if (!isdn_divert_entry) { |
293 | remove_proc_entry("isdn", init_net.proc_net); | 294 | remove_proc_entry("isdn", init_net.proc_net); |
294 | return (-1); | 295 | return (-1); |
295 | } | 296 | } |
296 | isdn_divert_entry->proc_fops = &isdn_fops; | ||
297 | isdn_divert_entry->owner = THIS_MODULE; | ||
298 | #endif /* CONFIG_PROC_FS */ | 297 | #endif /* CONFIG_PROC_FS */ |
299 | 298 | ||
300 | return (0); | 299 | return (0); |
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c index 0632a2606998..fae895828a17 100644 --- a/drivers/isdn/hardware/eicon/divasproc.c +++ b/drivers/isdn/hardware/eicon/divasproc.c | |||
@@ -125,15 +125,11 @@ static const struct file_operations divas_fops = { | |||
125 | 125 | ||
126 | int create_divas_proc(void) | 126 | int create_divas_proc(void) |
127 | { | 127 | { |
128 | divas_proc_entry = create_proc_entry(divas_proc_name, | 128 | proc_create(divas_proc_name, S_IFREG | S_IRUGO, proc_net_eicon, |
129 | S_IFREG | S_IRUGO, | 129 | &divas_fops); |
130 | proc_net_eicon); | ||
131 | if (!divas_proc_entry) | 130 | if (!divas_proc_entry) |
132 | return (0); | 131 | return (0); |
133 | 132 | ||
134 | divas_proc_entry->proc_fops = &divas_fops; | ||
135 | divas_proc_entry->owner = THIS_MODULE; | ||
136 | |||
137 | return (1); | 133 | return (1); |
138 | } | 134 | } |
139 | 135 | ||
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 27d890b48f88..877be9922c3d 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c | |||
@@ -370,6 +370,7 @@ hysdn_conf_close(struct inode *ino, struct file *filep) | |||
370 | /******************************************************/ | 370 | /******************************************************/ |
371 | static const struct file_operations conf_fops = | 371 | static const struct file_operations conf_fops = |
372 | { | 372 | { |
373 | .owner = THIS_MODULE, | ||
373 | .llseek = no_llseek, | 374 | .llseek = no_llseek, |
374 | .read = hysdn_conf_read, | 375 | .read = hysdn_conf_read, |
375 | .write = hysdn_conf_write, | 376 | .write = hysdn_conf_write, |
@@ -402,11 +403,9 @@ hysdn_procconf_init(void) | |||
402 | while (card) { | 403 | while (card) { |
403 | 404 | ||
404 | sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid); | 405 | sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid); |
405 | if ((card->procconf = (void *) create_proc_entry(conf_name, | 406 | if ((card->procconf = (void *) proc_create(conf_name, |
406 | S_IFREG | S_IRUGO | S_IWUSR, | 407 | S_IFREG | S_IRUGO | S_IWUSR, |
407 | hysdn_proc_entry)) != NULL) { | 408 | hysdn_proc_entry)) != NULL) { |
408 | ((struct proc_dir_entry *) card->procconf)->proc_fops = &conf_fops; | ||
409 | ((struct proc_dir_entry *) card->procconf)->owner = THIS_MODULE; | ||
410 | hysdn_proclog_init(card); /* init the log file entry */ | 409 | hysdn_proclog_init(card); /* init the log file entry */ |
411 | } | 410 | } |
412 | card = card->next; /* next entry */ | 411 | card = card->next; /* next entry */ |
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c index 27b3991fb0ec..8991d2c8ee4a 100644 --- a/drivers/isdn/hysdn/hysdn_proclog.c +++ b/drivers/isdn/hysdn/hysdn_proclog.c | |||
@@ -380,6 +380,7 @@ hysdn_log_poll(struct file *file, poll_table * wait) | |||
380 | /**************************************************/ | 380 | /**************************************************/ |
381 | static const struct file_operations log_fops = | 381 | static const struct file_operations log_fops = |
382 | { | 382 | { |
383 | .owner = THIS_MODULE, | ||
383 | .llseek = no_llseek, | 384 | .llseek = no_llseek, |
384 | .read = hysdn_log_read, | 385 | .read = hysdn_log_read, |
385 | .write = hysdn_log_write, | 386 | .write = hysdn_log_write, |
@@ -402,10 +403,9 @@ hysdn_proclog_init(hysdn_card * card) | |||
402 | 403 | ||
403 | if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) { | 404 | if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) { |
404 | sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid); | 405 | sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid); |
405 | if ((pd->log = create_proc_entry(pd->log_name, S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry)) != NULL) { | 406 | pd->log = proc_create(pd->log_name, |
406 | pd->log->proc_fops = &log_fops; | 407 | S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry, |
407 | pd->log->owner = THIS_MODULE; | 408 | &log_fops); |
408 | } | ||
409 | 409 | ||
410 | init_waitqueue_head(&(pd->rd_queue)); | 410 | init_waitqueue_head(&(pd->rd_queue)); |
411 | 411 | ||
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index ac05a928f764..b3c54be74556 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
@@ -105,7 +105,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) | |||
105 | 105 | ||
106 | led_cdev->dev = device_create(leds_class, parent, 0, "%s", | 106 | led_cdev->dev = device_create(leds_class, parent, 0, "%s", |
107 | led_cdev->name); | 107 | led_cdev->name); |
108 | if (unlikely(IS_ERR(led_cdev->dev))) | 108 | if (IS_ERR(led_cdev->dev)) |
109 | return PTR_ERR(led_cdev->dev); | 109 | return PTR_ERR(led_cdev->dev); |
110 | 110 | ||
111 | dev_set_drvdata(led_cdev->dev, led_cdev); | 111 | dev_set_drvdata(led_cdev->dev, led_cdev); |
diff --git a/drivers/mca/mca-legacy.c b/drivers/mca/mca-legacy.c index 0c7bfa74c8ef..494f0c2001f5 100644 --- a/drivers/mca/mca-legacy.c +++ b/drivers/mca/mca-legacy.c | |||
@@ -282,24 +282,6 @@ void mca_set_adapter_name(int slot, char* name) | |||
282 | EXPORT_SYMBOL(mca_set_adapter_name); | 282 | EXPORT_SYMBOL(mca_set_adapter_name); |
283 | 283 | ||
284 | /** | 284 | /** |
285 | * mca_is_adapter_used - check if claimed by driver | ||
286 | * @slot: slot to check | ||
287 | * | ||
288 | * Returns 1 if the slot has been claimed by a driver | ||
289 | */ | ||
290 | |||
291 | int mca_is_adapter_used(int slot) | ||
292 | { | ||
293 | struct mca_device *mca_dev = mca_find_device_by_slot(slot); | ||
294 | |||
295 | if(!mca_dev) | ||
296 | return 0; | ||
297 | |||
298 | return mca_device_claimed(mca_dev); | ||
299 | } | ||
300 | EXPORT_SYMBOL(mca_is_adapter_used); | ||
301 | |||
302 | /** | ||
303 | * mca_mark_as_used - claim an MCA device | 285 | * mca_mark_as_used - claim an MCA device |
304 | * @slot: slot to claim | 286 | * @slot: slot to claim |
305 | * FIXME: should we make this threadsafe | 287 | * FIXME: should we make this threadsafe |
diff --git a/drivers/mca/mca-proc.c b/drivers/mca/mca-proc.c index 33d5e0820cc5..81ea0d377bf4 100644 --- a/drivers/mca/mca-proc.c +++ b/drivers/mca/mca-proc.c | |||
@@ -183,7 +183,7 @@ void __init mca_do_proc_init(void) | |||
183 | struct proc_dir_entry* node = NULL; | 183 | struct proc_dir_entry* node = NULL; |
184 | struct mca_device *mca_dev; | 184 | struct mca_device *mca_dev; |
185 | 185 | ||
186 | proc_mca = proc_mkdir("mca", &proc_root); | 186 | proc_mca = proc_mkdir("mca", NULL); |
187 | create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL); | 187 | create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL); |
188 | create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL); | 188 | create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL); |
189 | 189 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index acd716b657b8..bb3e4b1cb773 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5948,13 +5948,9 @@ static struct notifier_block md_notifier = { | |||
5948 | 5948 | ||
5949 | static void md_geninit(void) | 5949 | static void md_geninit(void) |
5950 | { | 5950 | { |
5951 | struct proc_dir_entry *p; | ||
5952 | |||
5953 | dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); | 5951 | dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); |
5954 | 5952 | ||
5955 | p = create_proc_entry("mdstat", S_IRUGO, NULL); | 5953 | proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); |
5956 | if (p) | ||
5957 | p->proc_fops = &md_seq_fops; | ||
5958 | } | 5954 | } |
5959 | 5955 | ||
5960 | static int __init md_init(void) | 5956 | static int __init md_init(void) |
diff --git a/drivers/media/video/zoran_procfs.c b/drivers/media/video/zoran_procfs.c index 328ed6e7ac6a..870bc5a70e3f 100644 --- a/drivers/media/video/zoran_procfs.c +++ b/drivers/media/video/zoran_procfs.c | |||
@@ -180,6 +180,7 @@ static ssize_t zoran_write(struct file *file, const char __user *buffer, | |||
180 | } | 180 | } |
181 | 181 | ||
182 | static const struct file_operations zoran_operations = { | 182 | static const struct file_operations zoran_operations = { |
183 | .owner = THIS_MODULE, | ||
183 | .open = zoran_open, | 184 | .open = zoran_open, |
184 | .read = seq_read, | 185 | .read = seq_read, |
185 | .write = zoran_write, | 186 | .write = zoran_write, |
@@ -195,10 +196,8 @@ zoran_proc_init (struct zoran *zr) | |||
195 | char name[8]; | 196 | char name[8]; |
196 | 197 | ||
197 | snprintf(name, 7, "zoran%d", zr->id); | 198 | snprintf(name, 7, "zoran%d", zr->id); |
198 | if ((zr->zoran_proc = create_proc_entry(name, 0, NULL))) { | 199 | zr->zoran_proc = proc_create_data(name, 0, NULL, &zoran_operations, zr); |
199 | zr->zoran_proc->data = zr; | 200 | if (zr->zoran_proc != NULL) { |
200 | zr->zoran_proc->owner = THIS_MODULE; | ||
201 | zr->zoran_proc->proc_fops = &zoran_operations; | ||
202 | dprintk(2, | 201 | dprintk(2, |
203 | KERN_INFO | 202 | KERN_INFO |
204 | "%s: procfs entry /proc/%s allocated. data=%p\n", | 203 | "%s: procfs entry /proc/%s allocated. data=%p\n", |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index a95314897402..81483de8c0fd 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -371,7 +371,7 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | |||
371 | /* connect the i2o_block_request to the request */ | 371 | /* connect the i2o_block_request to the request */ |
372 | if (!req->special) { | 372 | if (!req->special) { |
373 | ireq = i2o_block_request_alloc(); | 373 | ireq = i2o_block_request_alloc(); |
374 | if (unlikely(IS_ERR(ireq))) { | 374 | if (IS_ERR(ireq)) { |
375 | osm_debug("unable to allocate i2o_block_request!\n"); | 375 | osm_debug("unable to allocate i2o_block_request!\n"); |
376 | return BLKPREP_DEFER; | 376 | return BLKPREP_DEFER; |
377 | } | 377 | } |
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c index 6fdd072201f9..54a3016ff45d 100644 --- a/drivers/message/i2o/i2o_proc.c +++ b/drivers/message/i2o/i2o_proc.c | |||
@@ -1893,13 +1893,11 @@ static int i2o_proc_create_entries(struct proc_dir_entry *dir, | |||
1893 | struct proc_dir_entry *tmp; | 1893 | struct proc_dir_entry *tmp; |
1894 | 1894 | ||
1895 | while (i2o_pe->name) { | 1895 | while (i2o_pe->name) { |
1896 | tmp = create_proc_entry(i2o_pe->name, i2o_pe->mode, dir); | 1896 | tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir, |
1897 | i2o_pe->fops, data); | ||
1897 | if (!tmp) | 1898 | if (!tmp) |
1898 | return -1; | 1899 | return -1; |
1899 | 1900 | ||
1900 | tmp->data = data; | ||
1901 | tmp->proc_fops = i2o_pe->fops; | ||
1902 | |||
1903 | i2o_pe++; | 1901 | i2o_pe++; |
1904 | } | 1902 | } |
1905 | 1903 | ||
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c index 302e92418bbe..ff51ab67231c 100644 --- a/drivers/misc/hdpuftrs/hdpu_cpustate.c +++ b/drivers/misc/hdpuftrs/hdpu_cpustate.c | |||
@@ -210,13 +210,10 @@ static int hdpu_cpustate_probe(struct platform_device *pdev) | |||
210 | return ret; | 210 | return ret; |
211 | } | 211 | } |
212 | 212 | ||
213 | proc_de = create_proc_entry("sky_cpustate", 0666, &proc_root); | 213 | proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate); |
214 | if (!proc_de) { | 214 | if (!proc_de) { |
215 | printk(KERN_WARNING "sky_cpustate: " | 215 | printk(KERN_WARNING "sky_cpustate: " |
216 | "Unable to create proc entry\n"); | 216 | "Unable to create proc entry\n"); |
217 | } else { | ||
218 | proc_de->proc_fops = &proc_cpustate; | ||
219 | proc_de->owner = THIS_MODULE; | ||
220 | } | 217 | } |
221 | 218 | ||
222 | printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); | 219 | printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); |
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c index 2fa36f7a6eb3..08e26beefe64 100644 --- a/drivers/misc/hdpuftrs/hdpu_nexus.c +++ b/drivers/misc/hdpuftrs/hdpu_nexus.c | |||
@@ -102,22 +102,17 @@ static int hdpu_nexus_probe(struct platform_device *pdev) | |||
102 | printk(KERN_ERR "sky_nexus: Could not map slot id\n"); | 102 | printk(KERN_ERR "sky_nexus: Could not map slot id\n"); |
103 | } | 103 | } |
104 | 104 | ||
105 | hdpu_slot_id = create_proc_entry("sky_slot_id", 0666, &proc_root); | 105 | hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id); |
106 | if (!hdpu_slot_id) { | 106 | if (!hdpu_slot_id) { |
107 | printk(KERN_WARNING "sky_nexus: " | 107 | printk(KERN_WARNING "sky_nexus: " |
108 | "Unable to create proc dir entry: sky_slot_id\n"); | 108 | "Unable to create proc dir entry: sky_slot_id\n"); |
109 | } else { | ||
110 | hdpu_slot_id->proc_fops = &proc_slot_id; | ||
111 | hdpu_slot_id->owner = THIS_MODULE; | ||
112 | } | 109 | } |
113 | 110 | ||
114 | hdpu_chassis_id = create_proc_entry("sky_chassis_id", 0666, &proc_root); | 111 | hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL, |
115 | if (!hdpu_chassis_id) { | 112 | &proc_chassis_id); |
113 | if (!hdpu_chassis_id) | ||
116 | printk(KERN_WARNING "sky_nexus: " | 114 | printk(KERN_WARNING "sky_nexus: " |
117 | "Unable to create proc dir entry: sky_chassis_id\n"); | 115 | "Unable to create proc dir entry: sky_chassis_id\n"); |
118 | } else { | ||
119 | hdpu_chassis_id->proc_fops = &proc_chassis_id; | ||
120 | hdpu_chassis_id->owner = THIS_MODULE; | ||
121 | } | 116 | } |
122 | 117 | ||
123 | return 0; | 118 | return 0; |
@@ -128,8 +123,8 @@ static int hdpu_nexus_remove(struct platform_device *pdev) | |||
128 | slot_id = -1; | 123 | slot_id = -1; |
129 | chassis_id = -1; | 124 | chassis_id = -1; |
130 | 125 | ||
131 | remove_proc_entry("sky_slot_id", &proc_root); | 126 | remove_proc_entry("sky_slot_id", NULL); |
132 | remove_proc_entry("sky_chassis_id", &proc_root); | 127 | remove_proc_entry("sky_chassis_id", NULL); |
133 | 128 | ||
134 | hdpu_slot_id = 0; | 129 | hdpu_slot_id = 0; |
135 | hdpu_chassis_id = 0; | 130 | hdpu_chassis_id = 0; |
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c index 1a0e7978226a..276d3fb68094 100644 --- a/drivers/misc/ibmasm/command.c +++ b/drivers/misc/ibmasm/command.c | |||
@@ -96,7 +96,7 @@ static inline void do_exec_command(struct service_processor *sp) | |||
96 | { | 96 | { |
97 | char tsbuf[32]; | 97 | char tsbuf[32]; |
98 | 98 | ||
99 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 99 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
100 | 100 | ||
101 | if (ibmasm_send_i2o_message(sp)) { | 101 | if (ibmasm_send_i2o_message(sp)) { |
102 | sp->current_command->status = IBMASM_CMD_FAILED; | 102 | sp->current_command->status = IBMASM_CMD_FAILED; |
@@ -119,7 +119,7 @@ void ibmasm_exec_command(struct service_processor *sp, struct command *cmd) | |||
119 | unsigned long flags; | 119 | unsigned long flags; |
120 | char tsbuf[32]; | 120 | char tsbuf[32]; |
121 | 121 | ||
122 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 122 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
123 | 123 | ||
124 | spin_lock_irqsave(&sp->lock, flags); | 124 | spin_lock_irqsave(&sp->lock, flags); |
125 | 125 | ||
@@ -139,7 +139,7 @@ static void exec_next_command(struct service_processor *sp) | |||
139 | unsigned long flags; | 139 | unsigned long flags; |
140 | char tsbuf[32]; | 140 | char tsbuf[32]; |
141 | 141 | ||
142 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 142 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
143 | 143 | ||
144 | spin_lock_irqsave(&sp->lock, flags); | 144 | spin_lock_irqsave(&sp->lock, flags); |
145 | sp->current_command = dequeue_command(sp); | 145 | sp->current_command = dequeue_command(sp); |
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c index 3036e785b3e4..1bc4306572a4 100644 --- a/drivers/misc/ibmasm/heartbeat.c +++ b/drivers/misc/ibmasm/heartbeat.c | |||
@@ -75,9 +75,9 @@ void ibmasm_heartbeat_exit(struct service_processor *sp) | |||
75 | { | 75 | { |
76 | char tsbuf[32]; | 76 | char tsbuf[32]; |
77 | 77 | ||
78 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 78 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
79 | ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL); | 79 | ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL); |
80 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 80 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
81 | suspend_heartbeats = 1; | 81 | suspend_heartbeats = 1; |
82 | command_put(sp->heartbeat); | 82 | command_put(sp->heartbeat); |
83 | } | 83 | } |
@@ -88,7 +88,7 @@ void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size | |||
88 | struct dot_command_header *header = (struct dot_command_header *)cmd->buffer; | 88 | struct dot_command_header *header = (struct dot_command_header *)cmd->buffer; |
89 | char tsbuf[32]; | 89 | char tsbuf[32]; |
90 | 90 | ||
91 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 91 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
92 | if (suspend_heartbeats) | 92 | if (suspend_heartbeats) |
93 | return; | 93 | return; |
94 | 94 | ||
diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c index 0c0bb3093e07..80a136352408 100644 --- a/drivers/misc/intel_menlow.c +++ b/drivers/misc/intel_menlow.c | |||
@@ -175,19 +175,17 @@ static int intel_menlow_memory_add(struct acpi_device *device) | |||
175 | goto end; | 175 | goto end; |
176 | } | 176 | } |
177 | 177 | ||
178 | if (cdev) { | 178 | acpi_driver_data(device) = cdev; |
179 | acpi_driver_data(device) = cdev; | 179 | result = sysfs_create_link(&device->dev.kobj, |
180 | result = sysfs_create_link(&device->dev.kobj, | 180 | &cdev->device.kobj, "thermal_cooling"); |
181 | &cdev->device.kobj, "thermal_cooling"); | 181 | if (result) |
182 | if (result) | 182 | goto unregister; |
183 | goto unregister; | 183 | |
184 | 184 | result = sysfs_create_link(&cdev->device.kobj, | |
185 | result = sysfs_create_link(&cdev->device.kobj, | 185 | &device->dev.kobj, "device"); |
186 | &device->dev.kobj, "device"); | 186 | if (result) { |
187 | if (result) { | 187 | sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); |
188 | sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); | 188 | goto unregister; |
189 | goto unregister; | ||
190 | } | ||
191 | } | 189 | } |
192 | 190 | ||
193 | end: | 191 | end: |
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c index 05172d2613d6..6f76573e7c8a 100644 --- a/drivers/misc/ioc4.c +++ b/drivers/misc/ioc4.c | |||
@@ -75,7 +75,7 @@ ioc4_register_submodule(struct ioc4_submodule *is) | |||
75 | printk(KERN_WARNING | 75 | printk(KERN_WARNING |
76 | "%s: IOC4 submodule %s probe failed " | 76 | "%s: IOC4 submodule %s probe failed " |
77 | "for pci_dev %s", | 77 | "for pci_dev %s", |
78 | __FUNCTION__, module_name(is->is_owner), | 78 | __func__, module_name(is->is_owner), |
79 | pci_name(idd->idd_pdev)); | 79 | pci_name(idd->idd_pdev)); |
80 | } | 80 | } |
81 | } | 81 | } |
@@ -102,7 +102,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is) | |||
102 | printk(KERN_WARNING | 102 | printk(KERN_WARNING |
103 | "%s: IOC4 submodule %s remove failed " | 103 | "%s: IOC4 submodule %s remove failed " |
104 | "for pci_dev %s.\n", | 104 | "for pci_dev %s.\n", |
105 | __FUNCTION__, module_name(is->is_owner), | 105 | __func__, module_name(is->is_owner), |
106 | pci_name(idd->idd_pdev)); | 106 | pci_name(idd->idd_pdev)); |
107 | } | 107 | } |
108 | } | 108 | } |
@@ -282,7 +282,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
282 | if ((ret = pci_enable_device(pdev))) { | 282 | if ((ret = pci_enable_device(pdev))) { |
283 | printk(KERN_WARNING | 283 | printk(KERN_WARNING |
284 | "%s: Failed to enable IOC4 device for pci_dev %s.\n", | 284 | "%s: Failed to enable IOC4 device for pci_dev %s.\n", |
285 | __FUNCTION__, pci_name(pdev)); | 285 | __func__, pci_name(pdev)); |
286 | goto out; | 286 | goto out; |
287 | } | 287 | } |
288 | pci_set_master(pdev); | 288 | pci_set_master(pdev); |
@@ -292,7 +292,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
292 | if (!idd) { | 292 | if (!idd) { |
293 | printk(KERN_WARNING | 293 | printk(KERN_WARNING |
294 | "%s: Failed to allocate IOC4 data for pci_dev %s.\n", | 294 | "%s: Failed to allocate IOC4 data for pci_dev %s.\n", |
295 | __FUNCTION__, pci_name(pdev)); | 295 | __func__, pci_name(pdev)); |
296 | ret = -ENODEV; | 296 | ret = -ENODEV; |
297 | goto out_idd; | 297 | goto out_idd; |
298 | } | 298 | } |
@@ -307,7 +307,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
307 | printk(KERN_WARNING | 307 | printk(KERN_WARNING |
308 | "%s: Unable to find IOC4 misc resource " | 308 | "%s: Unable to find IOC4 misc resource " |
309 | "for pci_dev %s.\n", | 309 | "for pci_dev %s.\n", |
310 | __FUNCTION__, pci_name(idd->idd_pdev)); | 310 | __func__, pci_name(idd->idd_pdev)); |
311 | ret = -ENODEV; | 311 | ret = -ENODEV; |
312 | goto out_pci; | 312 | goto out_pci; |
313 | } | 313 | } |
@@ -316,7 +316,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
316 | printk(KERN_WARNING | 316 | printk(KERN_WARNING |
317 | "%s: Unable to request IOC4 misc region " | 317 | "%s: Unable to request IOC4 misc region " |
318 | "for pci_dev %s.\n", | 318 | "for pci_dev %s.\n", |
319 | __FUNCTION__, pci_name(idd->idd_pdev)); | 319 | __func__, pci_name(idd->idd_pdev)); |
320 | ret = -ENODEV; | 320 | ret = -ENODEV; |
321 | goto out_pci; | 321 | goto out_pci; |
322 | } | 322 | } |
@@ -326,7 +326,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
326 | printk(KERN_WARNING | 326 | printk(KERN_WARNING |
327 | "%s: Unable to remap IOC4 misc region " | 327 | "%s: Unable to remap IOC4 misc region " |
328 | "for pci_dev %s.\n", | 328 | "for pci_dev %s.\n", |
329 | __FUNCTION__, pci_name(idd->idd_pdev)); | 329 | __func__, pci_name(idd->idd_pdev)); |
330 | ret = -ENODEV; | 330 | ret = -ENODEV; |
331 | goto out_misc_region; | 331 | goto out_misc_region; |
332 | } | 332 | } |
@@ -372,7 +372,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
372 | printk(KERN_WARNING | 372 | printk(KERN_WARNING |
373 | "%s: IOC4 submodule 0x%s probe failed " | 373 | "%s: IOC4 submodule 0x%s probe failed " |
374 | "for pci_dev %s.\n", | 374 | "for pci_dev %s.\n", |
375 | __FUNCTION__, module_name(is->is_owner), | 375 | __func__, module_name(is->is_owner), |
376 | pci_name(idd->idd_pdev)); | 376 | pci_name(idd->idd_pdev)); |
377 | } | 377 | } |
378 | } | 378 | } |
@@ -406,7 +406,7 @@ ioc4_remove(struct pci_dev *pdev) | |||
406 | printk(KERN_WARNING | 406 | printk(KERN_WARNING |
407 | "%s: IOC4 submodule 0x%s remove failed " | 407 | "%s: IOC4 submodule 0x%s remove failed " |
408 | "for pci_dev %s.\n", | 408 | "for pci_dev %s.\n", |
409 | __FUNCTION__, module_name(is->is_owner), | 409 | __func__, module_name(is->is_owner), |
410 | pci_name(idd->idd_pdev)); | 410 | pci_name(idd->idd_pdev)); |
411 | } | 411 | } |
412 | } | 412 | } |
@@ -418,7 +418,7 @@ ioc4_remove(struct pci_dev *pdev) | |||
418 | printk(KERN_WARNING | 418 | printk(KERN_WARNING |
419 | "%s: Unable to get IOC4 misc mapping for pci_dev %s. " | 419 | "%s: Unable to get IOC4 misc mapping for pci_dev %s. " |
420 | "Device removal may be incomplete.\n", | 420 | "Device removal may be incomplete.\n", |
421 | __FUNCTION__, pci_name(idd->idd_pdev)); | 421 | __func__, pci_name(idd->idd_pdev)); |
422 | } | 422 | } |
423 | release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); | 423 | release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); |
424 | 424 | ||
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 7fa61e907e1c..71d1c84e2fa8 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * or alternatively, you might use OpenHaptics provided by Sensable. | 12 | * or alternatively, you might use OpenHaptics provided by Sensable. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/compat.h> | ||
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #include <linux/module.h> | 17 | #include <linux/module.h> |
17 | #include <linux/device.h> | 18 | #include <linux/device.h> |
@@ -91,11 +92,8 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
91 | unsigned long flags; | 92 | unsigned long flags; |
92 | unsigned int i; | 93 | unsigned int i; |
93 | 94 | ||
94 | if (_IOC_TYPE(cmd) != PH_IOC_MAGIC || | ||
95 | _IOC_NR(cmd) > PH_IOC_MAXNR) | ||
96 | return -ENOTTY; | ||
97 | |||
98 | switch (cmd) { | 95 | switch (cmd) { |
96 | case PHN_SETREG: | ||
99 | case PHN_SET_REG: | 97 | case PHN_SET_REG: |
100 | if (copy_from_user(&r, argp, sizeof(r))) | 98 | if (copy_from_user(&r, argp, sizeof(r))) |
101 | return -EFAULT; | 99 | return -EFAULT; |
@@ -126,6 +124,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
126 | phantom_status(dev, dev->status & ~PHB_RUNNING); | 124 | phantom_status(dev, dev->status & ~PHB_RUNNING); |
127 | spin_unlock_irqrestore(&dev->regs_lock, flags); | 125 | spin_unlock_irqrestore(&dev->regs_lock, flags); |
128 | break; | 126 | break; |
127 | case PHN_SETREGS: | ||
129 | case PHN_SET_REGS: | 128 | case PHN_SET_REGS: |
130 | if (copy_from_user(&rs, argp, sizeof(rs))) | 129 | if (copy_from_user(&rs, argp, sizeof(rs))) |
131 | return -EFAULT; | 130 | return -EFAULT; |
@@ -143,6 +142,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
143 | } | 142 | } |
144 | spin_unlock_irqrestore(&dev->regs_lock, flags); | 143 | spin_unlock_irqrestore(&dev->regs_lock, flags); |
145 | break; | 144 | break; |
145 | case PHN_GETREG: | ||
146 | case PHN_GET_REG: | 146 | case PHN_GET_REG: |
147 | if (copy_from_user(&r, argp, sizeof(r))) | 147 | if (copy_from_user(&r, argp, sizeof(r))) |
148 | return -EFAULT; | 148 | return -EFAULT; |
@@ -155,6 +155,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
155 | if (copy_to_user(argp, &r, sizeof(r))) | 155 | if (copy_to_user(argp, &r, sizeof(r))) |
156 | return -EFAULT; | 156 | return -EFAULT; |
157 | break; | 157 | break; |
158 | case PHN_GETREGS: | ||
158 | case PHN_GET_REGS: { | 159 | case PHN_GET_REGS: { |
159 | u32 m; | 160 | u32 m; |
160 | 161 | ||
@@ -168,6 +169,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
168 | for (i = 0; i < m; i++) | 169 | for (i = 0; i < m; i++) |
169 | if (rs.mask & BIT(i)) | 170 | if (rs.mask & BIT(i)) |
170 | rs.values[i] = ioread32(dev->iaddr + i); | 171 | rs.values[i] = ioread32(dev->iaddr + i); |
172 | atomic_set(&dev->counter, 0); | ||
171 | spin_unlock_irqrestore(&dev->regs_lock, flags); | 173 | spin_unlock_irqrestore(&dev->regs_lock, flags); |
172 | 174 | ||
173 | if (copy_to_user(argp, &rs, sizeof(rs))) | 175 | if (copy_to_user(argp, &rs, sizeof(rs))) |
@@ -191,6 +193,20 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
191 | return 0; | 193 | return 0; |
192 | } | 194 | } |
193 | 195 | ||
196 | #ifdef CONFIG_COMPAT | ||
197 | static long phantom_compat_ioctl(struct file *filp, unsigned int cmd, | ||
198 | unsigned long arg) | ||
199 | { | ||
200 | if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { | ||
201 | cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT); | ||
202 | cmd |= sizeof(void *) << _IOC_SIZESHIFT; | ||
203 | } | ||
204 | return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | ||
205 | } | ||
206 | #else | ||
207 | #define phantom_compat_ioctl NULL | ||
208 | #endif | ||
209 | |||
194 | static int phantom_open(struct inode *inode, struct file *file) | 210 | static int phantom_open(struct inode *inode, struct file *file) |
195 | { | 211 | { |
196 | struct phantom_device *dev = container_of(inode->i_cdev, | 212 | struct phantom_device *dev = container_of(inode->i_cdev, |
@@ -239,11 +255,12 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait) | |||
239 | 255 | ||
240 | pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter)); | 256 | pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter)); |
241 | poll_wait(file, &dev->wait, wait); | 257 | poll_wait(file, &dev->wait, wait); |
242 | if (atomic_read(&dev->counter)) { | 258 | |
259 | if (!(dev->status & PHB_RUNNING)) | ||
260 | mask = POLLERR; | ||
261 | else if (atomic_read(&dev->counter)) | ||
243 | mask = POLLIN | POLLRDNORM; | 262 | mask = POLLIN | POLLRDNORM; |
244 | atomic_dec(&dev->counter); | 263 | |
245 | } else if ((dev->status & PHB_RUNNING) == 0) | ||
246 | mask = POLLIN | POLLRDNORM | POLLERR; | ||
247 | pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); | 264 | pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); |
248 | 265 | ||
249 | return mask; | 266 | return mask; |
@@ -253,6 +270,7 @@ static struct file_operations phantom_file_ops = { | |||
253 | .open = phantom_open, | 270 | .open = phantom_open, |
254 | .release = phantom_release, | 271 | .release = phantom_release, |
255 | .unlocked_ioctl = phantom_ioctl, | 272 | .unlocked_ioctl = phantom_ioctl, |
273 | .compat_ioctl = phantom_compat_ioctl, | ||
256 | .poll = phantom_poll, | 274 | .poll = phantom_poll, |
257 | }; | 275 | }; |
258 | 276 | ||
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c index 02ff3d19b1cc..00e48e2a9c11 100644 --- a/drivers/misc/sony-laptop.c +++ b/drivers/misc/sony-laptop.c | |||
@@ -961,7 +961,7 @@ static int sony_nc_resume(struct acpi_device *device) | |||
961 | ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, | 961 | ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, |
962 | item->value, NULL); | 962 | item->value, NULL); |
963 | if (ret < 0) { | 963 | if (ret < 0) { |
964 | printk("%s: %d\n", __FUNCTION__, ret); | 964 | printk("%s: %d\n", __func__, ret); |
965 | break; | 965 | break; |
966 | } | 966 | } |
967 | } | 967 | } |
@@ -1453,7 +1453,7 @@ static struct sonypi_eventtypes type4_events[] = { | |||
1453 | udelay(1); \ | 1453 | udelay(1); \ |
1454 | if (!n) \ | 1454 | if (!n) \ |
1455 | dprintk("command failed at %s : %s (line %d)\n", \ | 1455 | dprintk("command failed at %s : %s (line %d)\n", \ |
1456 | __FILE__, __FUNCTION__, __LINE__); \ | 1456 | __FILE__, __func__, __LINE__); \ |
1457 | } | 1457 | } |
1458 | 1458 | ||
1459 | static u8 sony_pic_call1(u8 dev) | 1459 | static u8 sony_pic_call1(u8 dev) |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 365024b83d3d..35508584ac2a 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c | |||
@@ -340,7 +340,7 @@ checkstatus: | |||
340 | 340 | ||
341 | /* SPI R3, R4, or R7 == R1 + 4 bytes */ | 341 | /* SPI R3, R4, or R7 == R1 + 4 bytes */ |
342 | case MMC_RSP_SPI_R3: | 342 | case MMC_RSP_SPI_R3: |
343 | cmd->resp[1] = be32_to_cpu(get_unaligned((u32 *)cp)); | 343 | cmd->resp[1] = get_unaligned_be32(cp); |
344 | break; | 344 | break; |
345 | 345 | ||
346 | /* SPI R1 == just one status byte */ | 346 | /* SPI R1 == just one status byte */ |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6e91b4b7aabb..6425603bc379 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3282,17 +3282,14 @@ static int bond_create_proc_entry(struct bonding *bond) | |||
3282 | struct net_device *bond_dev = bond->dev; | 3282 | struct net_device *bond_dev = bond->dev; |
3283 | 3283 | ||
3284 | if (bond_proc_dir) { | 3284 | if (bond_proc_dir) { |
3285 | bond->proc_entry = create_proc_entry(bond_dev->name, | 3285 | bond->proc_entry = proc_create_data(bond_dev->name, |
3286 | S_IRUGO, | 3286 | S_IRUGO, bond_proc_dir, |
3287 | bond_proc_dir); | 3287 | &bond_info_fops, bond); |
3288 | if (bond->proc_entry == NULL) { | 3288 | if (bond->proc_entry == NULL) { |
3289 | printk(KERN_WARNING DRV_NAME | 3289 | printk(KERN_WARNING DRV_NAME |
3290 | ": Warning: Cannot create /proc/net/%s/%s\n", | 3290 | ": Warning: Cannot create /proc/net/%s/%s\n", |
3291 | DRV_NAME, bond_dev->name); | 3291 | DRV_NAME, bond_dev->name); |
3292 | } else { | 3292 | } else { |
3293 | bond->proc_entry->data = bond; | ||
3294 | bond->proc_entry->proc_fops = &bond_info_fops; | ||
3295 | bond->proc_entry->owner = THIS_MODULE; | ||
3296 | memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); | 3293 | memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); |
3297 | } | 3294 | } |
3298 | } | 3295 | } |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 2d139ec79777..f3cba5e24ec5 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1802,7 +1802,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1802 | * it is protected by the before last buffer's el bit being set */ | 1802 | * it is protected by the before last buffer's el bit being set */ |
1803 | if (rx->prev->skb) { | 1803 | if (rx->prev->skb) { |
1804 | struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; | 1804 | struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; |
1805 | put_unaligned(cpu_to_le32(rx->dma_addr), &prev_rfd->link); | 1805 | put_unaligned_le32(rx->dma_addr, &prev_rfd->link); |
1806 | } | 1806 | } |
1807 | 1807 | ||
1808 | return 0; | 1808 | return 0; |
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index b53f6b6491b3..e5c2380f50ca 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c | |||
@@ -1508,7 +1508,7 @@ static int hamachi_rx(struct net_device *dev) | |||
1508 | hmp->rx_buf_sz, | 1508 | hmp->rx_buf_sz, |
1509 | PCI_DMA_FROMDEVICE); | 1509 | PCI_DMA_FROMDEVICE); |
1510 | buf_addr = (u8 *) hmp->rx_skbuff[entry]->data; | 1510 | buf_addr = (u8 *) hmp->rx_skbuff[entry]->data; |
1511 | frame_status = le32_to_cpu(get_unaligned((__le32*)&(buf_addr[data_size - 12]))); | 1511 | frame_status = get_unaligned_le32(&(buf_addr[data_size - 12])); |
1512 | if (hamachi_debug > 4) | 1512 | if (hamachi_debug > 4) |
1513 | printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", | 1513 | printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", |
1514 | frame_status); | 1514 | frame_status); |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index ce4fc2ec2fe4..00527805e4f1 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -1302,13 +1302,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | |||
1302 | if (ibmveth_proc_dir) { | 1302 | if (ibmveth_proc_dir) { |
1303 | char u_addr[10]; | 1303 | char u_addr[10]; |
1304 | sprintf(u_addr, "%x", adapter->vdev->unit_address); | 1304 | sprintf(u_addr, "%x", adapter->vdev->unit_address); |
1305 | entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir); | 1305 | entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir, |
1306 | if (!entry) { | 1306 | &ibmveth_proc_fops, adapter); |
1307 | if (!entry) | ||
1307 | ibmveth_error_printk("Cannot create adapter proc entry"); | 1308 | ibmveth_error_printk("Cannot create adapter proc entry"); |
1308 | } else { | ||
1309 | entry->data = (void *) adapter; | ||
1310 | entry->proc_fops = &ibmveth_proc_fops; | ||
1311 | } | ||
1312 | } | 1309 | } |
1313 | return; | 1310 | return; |
1314 | } | 1311 | } |
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 93916cf33f29..ad92d3ff1c40 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c | |||
@@ -464,7 +464,7 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) | |||
464 | } | 464 | } |
465 | 465 | ||
466 | fcs = ~(crc32_le(~0, buf, new_len)); | 466 | fcs = ~(crc32_le(~0, buf, new_len)); |
467 | if(fcs != le32_to_cpu(get_unaligned((__le32 *)(buf+new_len)))) { | 467 | if(fcs != get_unaligned_le32(buf + new_len)) { |
468 | IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); | 468 | IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); |
469 | mcs->stats.rx_errors++; | 469 | mcs->stats.rx_errors++; |
470 | mcs->stats.rx_crc_errors++; | 470 | mcs->stats.rx_crc_errors++; |
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index e59c485bc497..051963782749 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c | |||
@@ -329,7 +329,7 @@ static void fir_eof(struct stir_cb *stir) | |||
329 | } | 329 | } |
330 | 330 | ||
331 | fcs = ~(crc32_le(~0, rx_buff->data, len)); | 331 | fcs = ~(crc32_le(~0, rx_buff->data, len)); |
332 | if (fcs != le32_to_cpu(get_unaligned((__le32 *)(rx_buff->data+len)))) { | 332 | if (fcs != get_unaligned_le32(rx_buff->data + len)) { |
333 | pr_debug("crc error calc 0x%x len %d\n", fcs, len); | 333 | pr_debug("crc error calc 0x%x len %d\n", fcs, len); |
334 | stir->stats.rx_errors++; | 334 | stir->stats.rx_errors++; |
335 | stir->stats.rx_crc_errors++; | 335 | stir->stats.rx_crc_errors++; |
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index acd082a96a4f..d15e00b8591e 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c | |||
@@ -1674,13 +1674,12 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1674 | if (vlsi_proc_root != NULL) { | 1674 | if (vlsi_proc_root != NULL) { |
1675 | struct proc_dir_entry *ent; | 1675 | struct proc_dir_entry *ent; |
1676 | 1676 | ||
1677 | ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root); | 1677 | ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, |
1678 | vlsi_proc_root, VLSI_PROC_FOPS, ndev); | ||
1678 | if (!ent) { | 1679 | if (!ent) { |
1679 | IRDA_WARNING("%s: failed to create proc entry\n", | 1680 | IRDA_WARNING("%s: failed to create proc entry\n", |
1680 | __FUNCTION__); | 1681 | __FUNCTION__); |
1681 | } else { | 1682 | } else { |
1682 | ent->data = ndev; | ||
1683 | ent->proc_fops = VLSI_PROC_FOPS; | ||
1684 | ent->size = 0; | 1683 | ent->size = 0; |
1685 | } | 1684 | } |
1686 | idev->proc_entry = ent; | 1685 | idev->proc_entry = ent; |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index cead81e80f0c..ef63c8d2bd7e 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -2437,7 +2437,7 @@ static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) | |||
2437 | int status; | 2437 | int status; |
2438 | 2438 | ||
2439 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); | 2439 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); |
2440 | if (unlikely(IS_ERR(segs))) | 2440 | if (IS_ERR(segs)) |
2441 | goto drop; | 2441 | goto drop; |
2442 | 2442 | ||
2443 | while (segs) { | 2443 | while (segs) { |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 4fad4ddb3504..58a26a47af29 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -1052,11 +1052,9 @@ static int __init pppoe_proc_init(void) | |||
1052 | { | 1052 | { |
1053 | struct proc_dir_entry *p; | 1053 | struct proc_dir_entry *p; |
1054 | 1054 | ||
1055 | p = create_proc_entry("pppoe", S_IRUGO, init_net.proc_net); | 1055 | p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops); |
1056 | if (!p) | 1056 | if (!p) |
1057 | return -ENOMEM; | 1057 | return -ENOMEM; |
1058 | |||
1059 | p->proc_fops = &pppoe_seq_fops; | ||
1060 | return 0; | 1058 | return 0; |
1061 | } | 1059 | } |
1062 | #else /* CONFIG_PROC_FS */ | 1060 | #else /* CONFIG_PROC_FS */ |
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 3d10ca050b79..244d7830c92a 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -2469,12 +2469,12 @@ static int __init pppol2tp_init(void) | |||
2469 | goto out_unregister_pppol2tp_proto; | 2469 | goto out_unregister_pppol2tp_proto; |
2470 | 2470 | ||
2471 | #ifdef CONFIG_PROC_FS | 2471 | #ifdef CONFIG_PROC_FS |
2472 | pppol2tp_proc = create_proc_entry("pppol2tp", 0, init_net.proc_net); | 2472 | pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0, |
2473 | &pppol2tp_proc_fops); | ||
2473 | if (!pppol2tp_proc) { | 2474 | if (!pppol2tp_proc) { |
2474 | err = -ENOMEM; | 2475 | err = -ENOMEM; |
2475 | goto out_unregister_pppox_proto; | 2476 | goto out_unregister_pppox_proto; |
2476 | } | 2477 | } |
2477 | pppol2tp_proc->proc_fops = &pppol2tp_proc_fops; | ||
2478 | #endif /* CONFIG_PROC_FS */ | 2478 | #endif /* CONFIG_PROC_FS */ |
2479 | printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", | 2479 | printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", |
2480 | PPPOL2TP_DRV_VERSION); | 2480 | PPPOL2TP_DRV_VERSION); |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index e3f74c9f78bd..b66c75e3b8a1 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -4361,7 +4361,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | |||
4361 | } | 4361 | } |
4362 | 4362 | ||
4363 | segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); | 4363 | segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); |
4364 | if (unlikely(IS_ERR(segs))) | 4364 | if (IS_ERR(segs)) |
4365 | goto tg3_tso_bug_end; | 4365 | goto tg3_tso_bug_end; |
4366 | 4366 | ||
4367 | do { | 4367 | do { |
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 6c6fc325c8f9..bc30c6e8fea2 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
@@ -482,7 +482,6 @@ | |||
482 | static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; | 482 | static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; |
483 | 483 | ||
484 | #define c_char const char | 484 | #define c_char const char |
485 | #define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((__le16 *)(a))) | ||
486 | 485 | ||
487 | /* | 486 | /* |
488 | ** MII Information | 487 | ** MII Information |
@@ -4405,7 +4404,7 @@ srom_infoleaf_info(struct net_device *dev) | |||
4405 | } | 4404 | } |
4406 | } | 4405 | } |
4407 | 4406 | ||
4408 | lp->infoleaf_offset = TWIDDLE(p+1); | 4407 | lp->infoleaf_offset = get_unaligned_le16(p + 1); |
4409 | 4408 | ||
4410 | return 0; | 4409 | return 0; |
4411 | } | 4410 | } |
@@ -4476,7 +4475,7 @@ srom_exec(struct net_device *dev, u_char *p) | |||
4476 | 4475 | ||
4477 | while (count--) { | 4476 | while (count--) { |
4478 | gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? | 4477 | gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? |
4479 | *p++ : TWIDDLE(w++)), dev); | 4478 | *p++ : get_unaligned_le16(w++)), dev); |
4480 | mdelay(2); /* 2ms per action */ | 4479 | mdelay(2); /* 2ms per action */ |
4481 | } | 4480 | } |
4482 | 4481 | ||
@@ -4711,10 +4710,10 @@ type1_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4711 | lp->active = *p++; | 4710 | lp->active = *p++; |
4712 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1); | 4711 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1); |
4713 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1); | 4712 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1); |
4714 | lp->phy[lp->active].mc = TWIDDLE(p); p += 2; | 4713 | lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; |
4715 | lp->phy[lp->active].ana = TWIDDLE(p); p += 2; | 4714 | lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; |
4716 | lp->phy[lp->active].fdx = TWIDDLE(p); p += 2; | 4715 | lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; |
4717 | lp->phy[lp->active].ttm = TWIDDLE(p); | 4716 | lp->phy[lp->active].ttm = get_unaligned_le16(p); |
4718 | return 0; | 4717 | return 0; |
4719 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { | 4718 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { |
4720 | lp->ibn = 1; | 4719 | lp->ibn = 1; |
@@ -4751,16 +4750,16 @@ type2_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4751 | lp->infoblock_media = (*p) & MEDIA_CODE; | 4750 | lp->infoblock_media = (*p) & MEDIA_CODE; |
4752 | 4751 | ||
4753 | if ((*p++) & EXT_FIELD) { | 4752 | if ((*p++) & EXT_FIELD) { |
4754 | lp->cache.csr13 = TWIDDLE(p); p += 2; | 4753 | lp->cache.csr13 = get_unaligned_le16(p); p += 2; |
4755 | lp->cache.csr14 = TWIDDLE(p); p += 2; | 4754 | lp->cache.csr14 = get_unaligned_le16(p); p += 2; |
4756 | lp->cache.csr15 = TWIDDLE(p); p += 2; | 4755 | lp->cache.csr15 = get_unaligned_le16(p); p += 2; |
4757 | } else { | 4756 | } else { |
4758 | lp->cache.csr13 = CSR13; | 4757 | lp->cache.csr13 = CSR13; |
4759 | lp->cache.csr14 = CSR14; | 4758 | lp->cache.csr14 = CSR14; |
4760 | lp->cache.csr15 = CSR15; | 4759 | lp->cache.csr15 = CSR15; |
4761 | } | 4760 | } |
4762 | lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2; | 4761 | lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; |
4763 | lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); | 4762 | lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); |
4764 | lp->infoblock_csr6 = OMR_SIA; | 4763 | lp->infoblock_csr6 = OMR_SIA; |
4765 | lp->useMII = false; | 4764 | lp->useMII = false; |
4766 | 4765 | ||
@@ -4792,10 +4791,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4792 | if (MOTO_SROM_BUG) lp->active = 0; | 4791 | if (MOTO_SROM_BUG) lp->active = 0; |
4793 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); | 4792 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); |
4794 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); | 4793 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); |
4795 | lp->phy[lp->active].mc = TWIDDLE(p); p += 2; | 4794 | lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; |
4796 | lp->phy[lp->active].ana = TWIDDLE(p); p += 2; | 4795 | lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; |
4797 | lp->phy[lp->active].fdx = TWIDDLE(p); p += 2; | 4796 | lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; |
4798 | lp->phy[lp->active].ttm = TWIDDLE(p); p += 2; | 4797 | lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2; |
4799 | lp->phy[lp->active].mci = *p; | 4798 | lp->phy[lp->active].mci = *p; |
4800 | return 0; | 4799 | return 0; |
4801 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { | 4800 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { |
@@ -4835,8 +4834,8 @@ type4_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4835 | lp->cache.csr13 = CSR13; /* Hard coded defaults */ | 4834 | lp->cache.csr13 = CSR13; /* Hard coded defaults */ |
4836 | lp->cache.csr14 = CSR14; | 4835 | lp->cache.csr14 = CSR14; |
4837 | lp->cache.csr15 = CSR15; | 4836 | lp->cache.csr15 = CSR15; |
4838 | lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2; | 4837 | lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; |
4839 | lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); p += 2; | 4838 | lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2; |
4840 | csr6 = *p++; | 4839 | csr6 = *p++; |
4841 | flags = *p++; | 4840 | flags = *p++; |
4842 | 4841 | ||
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h index 9fb8d7f07994..f5f33b3eb067 100644 --- a/drivers/net/tulip/de4x5.h +++ b/drivers/net/tulip/de4x5.h | |||
@@ -1017,4 +1017,4 @@ struct de4x5_ioctl { | |||
1017 | #define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */ | 1017 | #define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */ |
1018 | #define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */ | 1018 | #define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */ |
1019 | 1019 | ||
1020 | #define MOTO_SROM_BUG ((lp->active == 8) && (((le32_to_cpu(get_unaligned(((__le32 *)dev->dev_addr))))&0x00ffffff)==0x3e0008)) | 1020 | #define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008) |
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 908422f2f320..92c68a22f16b 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
28 | #include <asm/unaligned.h> | ||
28 | 29 | ||
29 | 30 | ||
30 | 31 | ||
@@ -304,11 +305,7 @@ enum t21143_csr6_bits { | |||
304 | 305 | ||
305 | #define RUN_AT(x) (jiffies + (x)) | 306 | #define RUN_AT(x) (jiffies + (x)) |
306 | 307 | ||
307 | #if defined(__i386__) /* AKA get_unaligned() */ | 308 | #define get_u16(ptr) get_unaligned_le16((ptr)) |
308 | #define get_u16(ptr) (*(u16 *)(ptr)) | ||
309 | #else | ||
310 | #define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8)) | ||
311 | #endif | ||
312 | 309 | ||
313 | struct medialeaf { | 310 | struct medialeaf { |
314 | u8 type; | 311 | u8 type; |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index fa1c1c329a2d..f9d13fa05d64 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -327,8 +327,8 @@ static void tulip_up(struct net_device *dev) | |||
327 | tp->dirty_rx = tp->dirty_tx = 0; | 327 | tp->dirty_rx = tp->dirty_tx = 0; |
328 | 328 | ||
329 | if (tp->flags & MC_HASH_ONLY) { | 329 | if (tp->flags & MC_HASH_ONLY) { |
330 | u32 addr_low = le32_to_cpu(get_unaligned((__le32 *)dev->dev_addr)); | 330 | u32 addr_low = get_unaligned_le32(dev->dev_addr); |
331 | u32 addr_high = le16_to_cpu(get_unaligned((__le16 *)(dev->dev_addr+4))); | 331 | u32 addr_high = get_unaligned_le16(dev->dev_addr + 4); |
332 | if (tp->chip_id == AX88140) { | 332 | if (tp->chip_id == AX88140) { |
333 | iowrite32(0, ioaddr + CSR13); | 333 | iowrite32(0, ioaddr + CSR13); |
334 | iowrite32(addr_low, ioaddr + CSR14); | 334 | iowrite32(addr_low, ioaddr + CSR14); |
@@ -1437,13 +1437,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1437 | do | 1437 | do |
1438 | value = ioread32(ioaddr + CSR9); | 1438 | value = ioread32(ioaddr + CSR9); |
1439 | while (value < 0 && --boguscnt > 0); | 1439 | while (value < 0 && --boguscnt > 0); |
1440 | put_unaligned(cpu_to_le16(value), ((__le16*)dev->dev_addr) + i); | 1440 | put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i); |
1441 | sum += value & 0xffff; | 1441 | sum += value & 0xffff; |
1442 | } | 1442 | } |
1443 | } else if (chip_idx == COMET) { | 1443 | } else if (chip_idx == COMET) { |
1444 | /* No need to read the EEPROM. */ | 1444 | /* No need to read the EEPROM. */ |
1445 | put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (__le32 *)dev->dev_addr); | 1445 | put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr); |
1446 | put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (__le16 *)(dev->dev_addr + 4)); | 1446 | put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4); |
1447 | for (i = 0; i < 6; i ++) | 1447 | for (i = 0; i < 6; i ++) |
1448 | sum += dev->dev_addr[i]; | 1448 | sum += dev->dev_addr[i]; |
1449 | } else { | 1449 | } else { |
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 932d6b1c9d0b..45f47c1c0a35 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -3657,7 +3657,7 @@ void mpi_receive_802_11 (struct airo_info *ai) | |||
3657 | ptr += hdrlen; | 3657 | ptr += hdrlen; |
3658 | if (hdrlen == 24) | 3658 | if (hdrlen == 24) |
3659 | ptr += 6; | 3659 | ptr += 6; |
3660 | gap = le16_to_cpu(get_unaligned((__le16 *)ptr)); | 3660 | gap = get_unaligned_le16(ptr); |
3661 | ptr += sizeof(__le16); | 3661 | ptr += sizeof(__le16); |
3662 | if (gap) { | 3662 | if (gap) { |
3663 | if (gap <= 8) | 3663 | if (gap <= 8) |
@@ -4347,24 +4347,28 @@ static int proc_config_open( struct inode *inode, struct file *file ); | |||
4347 | static int proc_wepkey_open( struct inode *inode, struct file *file ); | 4347 | static int proc_wepkey_open( struct inode *inode, struct file *file ); |
4348 | 4348 | ||
4349 | static const struct file_operations proc_statsdelta_ops = { | 4349 | static const struct file_operations proc_statsdelta_ops = { |
4350 | .owner = THIS_MODULE, | ||
4350 | .read = proc_read, | 4351 | .read = proc_read, |
4351 | .open = proc_statsdelta_open, | 4352 | .open = proc_statsdelta_open, |
4352 | .release = proc_close | 4353 | .release = proc_close |
4353 | }; | 4354 | }; |
4354 | 4355 | ||
4355 | static const struct file_operations proc_stats_ops = { | 4356 | static const struct file_operations proc_stats_ops = { |
4357 | .owner = THIS_MODULE, | ||
4356 | .read = proc_read, | 4358 | .read = proc_read, |
4357 | .open = proc_stats_open, | 4359 | .open = proc_stats_open, |
4358 | .release = proc_close | 4360 | .release = proc_close |
4359 | }; | 4361 | }; |
4360 | 4362 | ||
4361 | static const struct file_operations proc_status_ops = { | 4363 | static const struct file_operations proc_status_ops = { |
4364 | .owner = THIS_MODULE, | ||
4362 | .read = proc_read, | 4365 | .read = proc_read, |
4363 | .open = proc_status_open, | 4366 | .open = proc_status_open, |
4364 | .release = proc_close | 4367 | .release = proc_close |
4365 | }; | 4368 | }; |
4366 | 4369 | ||
4367 | static const struct file_operations proc_SSID_ops = { | 4370 | static const struct file_operations proc_SSID_ops = { |
4371 | .owner = THIS_MODULE, | ||
4368 | .read = proc_read, | 4372 | .read = proc_read, |
4369 | .write = proc_write, | 4373 | .write = proc_write, |
4370 | .open = proc_SSID_open, | 4374 | .open = proc_SSID_open, |
@@ -4372,6 +4376,7 @@ static const struct file_operations proc_SSID_ops = { | |||
4372 | }; | 4376 | }; |
4373 | 4377 | ||
4374 | static const struct file_operations proc_BSSList_ops = { | 4378 | static const struct file_operations proc_BSSList_ops = { |
4379 | .owner = THIS_MODULE, | ||
4375 | .read = proc_read, | 4380 | .read = proc_read, |
4376 | .write = proc_write, | 4381 | .write = proc_write, |
4377 | .open = proc_BSSList_open, | 4382 | .open = proc_BSSList_open, |
@@ -4379,6 +4384,7 @@ static const struct file_operations proc_BSSList_ops = { | |||
4379 | }; | 4384 | }; |
4380 | 4385 | ||
4381 | static const struct file_operations proc_APList_ops = { | 4386 | static const struct file_operations proc_APList_ops = { |
4387 | .owner = THIS_MODULE, | ||
4382 | .read = proc_read, | 4388 | .read = proc_read, |
4383 | .write = proc_write, | 4389 | .write = proc_write, |
4384 | .open = proc_APList_open, | 4390 | .open = proc_APList_open, |
@@ -4386,6 +4392,7 @@ static const struct file_operations proc_APList_ops = { | |||
4386 | }; | 4392 | }; |
4387 | 4393 | ||
4388 | static const struct file_operations proc_config_ops = { | 4394 | static const struct file_operations proc_config_ops = { |
4395 | .owner = THIS_MODULE, | ||
4389 | .read = proc_read, | 4396 | .read = proc_read, |
4390 | .write = proc_write, | 4397 | .write = proc_write, |
4391 | .open = proc_config_open, | 4398 | .open = proc_config_open, |
@@ -4393,6 +4400,7 @@ static const struct file_operations proc_config_ops = { | |||
4393 | }; | 4400 | }; |
4394 | 4401 | ||
4395 | static const struct file_operations proc_wepkey_ops = { | 4402 | static const struct file_operations proc_wepkey_ops = { |
4403 | .owner = THIS_MODULE, | ||
4396 | .read = proc_read, | 4404 | .read = proc_read, |
4397 | .write = proc_write, | 4405 | .write = proc_write, |
4398 | .open = proc_wepkey_open, | 4406 | .open = proc_wepkey_open, |
@@ -4411,10 +4419,6 @@ struct proc_data { | |||
4411 | void (*on_close) (struct inode *, struct file *); | 4419 | void (*on_close) (struct inode *, struct file *); |
4412 | }; | 4420 | }; |
4413 | 4421 | ||
4414 | #ifndef SETPROC_OPS | ||
4415 | #define SETPROC_OPS(entry, ops) (entry)->proc_fops = &(ops) | ||
4416 | #endif | ||
4417 | |||
4418 | static int setup_proc_entry( struct net_device *dev, | 4422 | static int setup_proc_entry( struct net_device *dev, |
4419 | struct airo_info *apriv ) { | 4423 | struct airo_info *apriv ) { |
4420 | struct proc_dir_entry *entry; | 4424 | struct proc_dir_entry *entry; |
@@ -4430,100 +4434,76 @@ static int setup_proc_entry( struct net_device *dev, | |||
4430 | apriv->proc_entry->owner = THIS_MODULE; | 4434 | apriv->proc_entry->owner = THIS_MODULE; |
4431 | 4435 | ||
4432 | /* Setup the StatsDelta */ | 4436 | /* Setup the StatsDelta */ |
4433 | entry = create_proc_entry("StatsDelta", | 4437 | entry = proc_create_data("StatsDelta", |
4434 | S_IFREG | (S_IRUGO&proc_perm), | 4438 | S_IFREG | (S_IRUGO&proc_perm), |
4435 | apriv->proc_entry); | 4439 | apriv->proc_entry, &proc_statsdelta_ops, dev); |
4436 | if (!entry) | 4440 | if (!entry) |
4437 | goto fail_stats_delta; | 4441 | goto fail_stats_delta; |
4438 | entry->uid = proc_uid; | 4442 | entry->uid = proc_uid; |
4439 | entry->gid = proc_gid; | 4443 | entry->gid = proc_gid; |
4440 | entry->data = dev; | ||
4441 | entry->owner = THIS_MODULE; | ||
4442 | SETPROC_OPS(entry, proc_statsdelta_ops); | ||
4443 | 4444 | ||
4444 | /* Setup the Stats */ | 4445 | /* Setup the Stats */ |
4445 | entry = create_proc_entry("Stats", | 4446 | entry = proc_create_data("Stats", |
4446 | S_IFREG | (S_IRUGO&proc_perm), | 4447 | S_IFREG | (S_IRUGO&proc_perm), |
4447 | apriv->proc_entry); | 4448 | apriv->proc_entry, &proc_stats_ops, dev); |
4448 | if (!entry) | 4449 | if (!entry) |
4449 | goto fail_stats; | 4450 | goto fail_stats; |
4450 | entry->uid = proc_uid; | 4451 | entry->uid = proc_uid; |
4451 | entry->gid = proc_gid; | 4452 | entry->gid = proc_gid; |
4452 | entry->data = dev; | ||
4453 | entry->owner = THIS_MODULE; | ||
4454 | SETPROC_OPS(entry, proc_stats_ops); | ||
4455 | 4453 | ||
4456 | /* Setup the Status */ | 4454 | /* Setup the Status */ |
4457 | entry = create_proc_entry("Status", | 4455 | entry = proc_create_data("Status", |
4458 | S_IFREG | (S_IRUGO&proc_perm), | 4456 | S_IFREG | (S_IRUGO&proc_perm), |
4459 | apriv->proc_entry); | 4457 | apriv->proc_entry, &proc_status_ops, dev); |
4460 | if (!entry) | 4458 | if (!entry) |
4461 | goto fail_status; | 4459 | goto fail_status; |
4462 | entry->uid = proc_uid; | 4460 | entry->uid = proc_uid; |
4463 | entry->gid = proc_gid; | 4461 | entry->gid = proc_gid; |
4464 | entry->data = dev; | ||
4465 | entry->owner = THIS_MODULE; | ||
4466 | SETPROC_OPS(entry, proc_status_ops); | ||
4467 | 4462 | ||
4468 | /* Setup the Config */ | 4463 | /* Setup the Config */ |
4469 | entry = create_proc_entry("Config", | 4464 | entry = proc_create_data("Config", |
4470 | S_IFREG | proc_perm, | 4465 | S_IFREG | proc_perm, |
4471 | apriv->proc_entry); | 4466 | apriv->proc_entry, &proc_config_ops, dev); |
4472 | if (!entry) | 4467 | if (!entry) |
4473 | goto fail_config; | 4468 | goto fail_config; |
4474 | entry->uid = proc_uid; | 4469 | entry->uid = proc_uid; |
4475 | entry->gid = proc_gid; | 4470 | entry->gid = proc_gid; |
4476 | entry->data = dev; | ||
4477 | entry->owner = THIS_MODULE; | ||
4478 | SETPROC_OPS(entry, proc_config_ops); | ||
4479 | 4471 | ||
4480 | /* Setup the SSID */ | 4472 | /* Setup the SSID */ |
4481 | entry = create_proc_entry("SSID", | 4473 | entry = proc_create_data("SSID", |
4482 | S_IFREG | proc_perm, | 4474 | S_IFREG | proc_perm, |
4483 | apriv->proc_entry); | 4475 | apriv->proc_entry, &proc_SSID_ops, dev); |
4484 | if (!entry) | 4476 | if (!entry) |
4485 | goto fail_ssid; | 4477 | goto fail_ssid; |
4486 | entry->uid = proc_uid; | 4478 | entry->uid = proc_uid; |
4487 | entry->gid = proc_gid; | 4479 | entry->gid = proc_gid; |
4488 | entry->data = dev; | ||
4489 | entry->owner = THIS_MODULE; | ||
4490 | SETPROC_OPS(entry, proc_SSID_ops); | ||
4491 | 4480 | ||
4492 | /* Setup the APList */ | 4481 | /* Setup the APList */ |
4493 | entry = create_proc_entry("APList", | 4482 | entry = proc_create_data("APList", |
4494 | S_IFREG | proc_perm, | 4483 | S_IFREG | proc_perm, |
4495 | apriv->proc_entry); | 4484 | apriv->proc_entry, &proc_APList_ops, dev); |
4496 | if (!entry) | 4485 | if (!entry) |
4497 | goto fail_aplist; | 4486 | goto fail_aplist; |
4498 | entry->uid = proc_uid; | 4487 | entry->uid = proc_uid; |
4499 | entry->gid = proc_gid; | 4488 | entry->gid = proc_gid; |
4500 | entry->data = dev; | ||
4501 | entry->owner = THIS_MODULE; | ||
4502 | SETPROC_OPS(entry, proc_APList_ops); | ||
4503 | 4489 | ||
4504 | /* Setup the BSSList */ | 4490 | /* Setup the BSSList */ |
4505 | entry = create_proc_entry("BSSList", | 4491 | entry = proc_create_data("BSSList", |
4506 | S_IFREG | proc_perm, | 4492 | S_IFREG | proc_perm, |
4507 | apriv->proc_entry); | 4493 | apriv->proc_entry, &proc_BSSList_ops, dev); |
4508 | if (!entry) | 4494 | if (!entry) |
4509 | goto fail_bsslist; | 4495 | goto fail_bsslist; |
4510 | entry->uid = proc_uid; | 4496 | entry->uid = proc_uid; |
4511 | entry->gid = proc_gid; | 4497 | entry->gid = proc_gid; |
4512 | entry->data = dev; | ||
4513 | entry->owner = THIS_MODULE; | ||
4514 | SETPROC_OPS(entry, proc_BSSList_ops); | ||
4515 | 4498 | ||
4516 | /* Setup the WepKey */ | 4499 | /* Setup the WepKey */ |
4517 | entry = create_proc_entry("WepKey", | 4500 | entry = proc_create_data("WepKey", |
4518 | S_IFREG | proc_perm, | 4501 | S_IFREG | proc_perm, |
4519 | apriv->proc_entry); | 4502 | apriv->proc_entry, &proc_wepkey_ops, dev); |
4520 | if (!entry) | 4503 | if (!entry) |
4521 | goto fail_wepkey; | 4504 | goto fail_wepkey; |
4522 | entry->uid = proc_uid; | 4505 | entry->uid = proc_uid; |
4523 | entry->gid = proc_gid; | 4506 | entry->gid = proc_gid; |
4524 | entry->data = dev; | ||
4525 | entry->owner = THIS_MODULE; | ||
4526 | SETPROC_OPS(entry, proc_wepkey_ops); | ||
4527 | 4507 | ||
4528 | return 0; | 4508 | return 0; |
4529 | 4509 | ||
@@ -5625,9 +5605,9 @@ static int __init airo_init_module( void ) | |||
5625 | int have_isa_dev = 0; | 5605 | int have_isa_dev = 0; |
5626 | #endif | 5606 | #endif |
5627 | 5607 | ||
5628 | airo_entry = create_proc_entry("aironet", | 5608 | airo_entry = create_proc_entry("driver/aironet", |
5629 | S_IFDIR | airo_perm, | 5609 | S_IFDIR | airo_perm, |
5630 | proc_root_driver); | 5610 | NULL); |
5631 | 5611 | ||
5632 | if (airo_entry) { | 5612 | if (airo_entry) { |
5633 | airo_entry->uid = proc_uid; | 5613 | airo_entry->uid = proc_uid; |
@@ -5651,7 +5631,7 @@ static int __init airo_init_module( void ) | |||
5651 | airo_print_info("", "Finished probing for PCI adapters"); | 5631 | airo_print_info("", "Finished probing for PCI adapters"); |
5652 | 5632 | ||
5653 | if (i) { | 5633 | if (i) { |
5654 | remove_proc_entry("aironet", proc_root_driver); | 5634 | remove_proc_entry("driver/aironet", NULL); |
5655 | return i; | 5635 | return i; |
5656 | } | 5636 | } |
5657 | #endif | 5637 | #endif |
@@ -5673,7 +5653,7 @@ static void __exit airo_cleanup_module( void ) | |||
5673 | #ifdef CONFIG_PCI | 5653 | #ifdef CONFIG_PCI |
5674 | pci_unregister_driver(&airo_driver); | 5654 | pci_unregister_driver(&airo_driver); |
5675 | #endif | 5655 | #endif |
5676 | remove_proc_entry("aironet", proc_root_driver); | 5656 | remove_proc_entry("driver/aironet", NULL); |
5677 | } | 5657 | } |
5678 | 5658 | ||
5679 | /* | 5659 | /* |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index e18305b781c9..4e5c8fc35200 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -58,10 +58,6 @@ | |||
58 | #include "reg.h" | 58 | #include "reg.h" |
59 | #include "debug.h" | 59 | #include "debug.h" |
60 | 60 | ||
61 | /* unaligned little endian access */ | ||
62 | #define LE_READ_2(_p) (le16_to_cpu(get_unaligned((__le16 *)(_p)))) | ||
63 | #define LE_READ_4(_p) (le32_to_cpu(get_unaligned((__le32 *)(_p)))) | ||
64 | |||
65 | enum { | 61 | enum { |
66 | ATH_LED_TX, | 62 | ATH_LED_TX, |
67 | ATH_LED_RX, | 63 | ATH_LED_RX, |
@@ -2909,9 +2905,9 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw, | |||
2909 | if (!mclist) | 2905 | if (!mclist) |
2910 | break; | 2906 | break; |
2911 | /* calculate XOR of eight 6-bit values */ | 2907 | /* calculate XOR of eight 6-bit values */ |
2912 | val = LE_READ_4(mclist->dmi_addr + 0); | 2908 | val = get_unaligned_le32(mclist->dmi_addr + 0); |
2913 | pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; | 2909 | pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
2914 | val = LE_READ_4(mclist->dmi_addr + 3); | 2910 | val = get_unaligned_le32(mclist->dmi_addr + 3); |
2915 | pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; | 2911 | pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
2916 | pos &= 0x3f; | 2912 | pos &= 0x3f; |
2917 | mfilt[pos / 32] |= (1 << (pos % 32)); | 2913 | mfilt[pos / 32] |= (1 << (pos % 32)); |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 4bf8a99099fe..8c24cd72aaca 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -2171,7 +2171,7 @@ static int b43_write_initvals(struct b43_wldev *dev, | |||
2171 | goto err_format; | 2171 | goto err_format; |
2172 | array_size -= sizeof(iv->data.d32); | 2172 | array_size -= sizeof(iv->data.d32); |
2173 | 2173 | ||
2174 | value = be32_to_cpu(get_unaligned(&iv->data.d32)); | 2174 | value = get_unaligned_be32(&iv->data.d32); |
2175 | b43_write32(dev, offset, value); | 2175 | b43_write32(dev, offset, value); |
2176 | 2176 | ||
2177 | iv = (const struct b43_iv *)((const uint8_t *)iv + | 2177 | iv = (const struct b43_iv *)((const uint8_t *)iv + |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index ef829ee8ffd4..14a5eea2573e 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -1720,7 +1720,7 @@ static int b43legacy_write_initvals(struct b43legacy_wldev *dev, | |||
1720 | goto err_format; | 1720 | goto err_format; |
1721 | array_size -= sizeof(iv->data.d32); | 1721 | array_size -= sizeof(iv->data.d32); |
1722 | 1722 | ||
1723 | value = be32_to_cpu(get_unaligned(&iv->data.d32)); | 1723 | value = get_unaligned_be32(&iv->data.d32); |
1724 | b43legacy_write32(dev, offset, value); | 1724 | b43legacy_write32(dev, offset, value); |
1725 | 1725 | ||
1726 | iv = (const struct b43legacy_iv *)((const uint8_t *)iv + | 1726 | iv = (const struct b43legacy_iv *)((const uint8_t *)iv + |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 598e4eef4f40..d3406830c8e3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -554,40 +554,36 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv, | |||
554 | iwl3945_rt->rt_hdr.it_pad = 0; | 554 | iwl3945_rt->rt_hdr.it_pad = 0; |
555 | 555 | ||
556 | /* total header + data */ | 556 | /* total header + data */ |
557 | put_unaligned(cpu_to_le16(sizeof(*iwl3945_rt)), | 557 | put_unaligned_le16(sizeof(*iwl3945_rt), &iwl3945_rt->rt_hdr.it_len); |
558 | &iwl3945_rt->rt_hdr.it_len); | ||
559 | 558 | ||
560 | /* Indicate all the fields we add to the radiotap header */ | 559 | /* Indicate all the fields we add to the radiotap header */ |
561 | put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | | 560 | put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) | |
562 | (1 << IEEE80211_RADIOTAP_FLAGS) | | 561 | (1 << IEEE80211_RADIOTAP_FLAGS) | |
563 | (1 << IEEE80211_RADIOTAP_RATE) | | 562 | (1 << IEEE80211_RADIOTAP_RATE) | |
564 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | 563 | (1 << IEEE80211_RADIOTAP_CHANNEL) | |
565 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | | 564 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | |
566 | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | | 565 | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | |
567 | (1 << IEEE80211_RADIOTAP_ANTENNA)), | 566 | (1 << IEEE80211_RADIOTAP_ANTENNA), |
568 | &iwl3945_rt->rt_hdr.it_present); | 567 | &iwl3945_rt->rt_hdr.it_present); |
569 | 568 | ||
570 | /* Zero the flags, we'll add to them as we go */ | 569 | /* Zero the flags, we'll add to them as we go */ |
571 | iwl3945_rt->rt_flags = 0; | 570 | iwl3945_rt->rt_flags = 0; |
572 | 571 | ||
573 | put_unaligned(cpu_to_le64(tsf), &iwl3945_rt->rt_tsf); | 572 | put_unaligned_le64(tsf, &iwl3945_rt->rt_tsf); |
574 | 573 | ||
575 | iwl3945_rt->rt_dbmsignal = signal; | 574 | iwl3945_rt->rt_dbmsignal = signal; |
576 | iwl3945_rt->rt_dbmnoise = noise; | 575 | iwl3945_rt->rt_dbmnoise = noise; |
577 | 576 | ||
578 | /* Convert the channel frequency and set the flags */ | 577 | /* Convert the channel frequency and set the flags */ |
579 | put_unaligned(cpu_to_le16(stats->freq), &iwl3945_rt->rt_channelMHz); | 578 | put_unaligned_le16(stats->freq, &iwl3945_rt->rt_channelMHz); |
580 | if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) | 579 | if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) |
581 | put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM | | 580 | put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, |
582 | IEEE80211_CHAN_5GHZ), | ||
583 | &iwl3945_rt->rt_chbitmask); | 581 | &iwl3945_rt->rt_chbitmask); |
584 | else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) | 582 | else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) |
585 | put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK | | 583 | put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, |
586 | IEEE80211_CHAN_2GHZ), | ||
587 | &iwl3945_rt->rt_chbitmask); | 584 | &iwl3945_rt->rt_chbitmask); |
588 | else /* 802.11g */ | 585 | else /* 802.11g */ |
589 | put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM | | 586 | put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, |
590 | IEEE80211_CHAN_2GHZ), | ||
591 | &iwl3945_rt->rt_chbitmask); | 587 | &iwl3945_rt->rt_chbitmask); |
592 | 588 | ||
593 | if (rate == -1) | 589 | if (rate == -1) |
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index e72c97a0d6c1..1a409fcc80d3 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c | |||
@@ -522,7 +522,7 @@ static int lbs_process_bss(struct bss_descriptor *bss, | |||
522 | 522 | ||
523 | if (*bytesleft >= sizeof(beaconsize)) { | 523 | if (*bytesleft >= sizeof(beaconsize)) { |
524 | /* Extract & convert beacon size from the command buffer */ | 524 | /* Extract & convert beacon size from the command buffer */ |
525 | beaconsize = le16_to_cpu(get_unaligned((__le16 *)*pbeaconinfo)); | 525 | beaconsize = get_unaligned_le16(*pbeaconinfo); |
526 | *bytesleft -= sizeof(beaconsize); | 526 | *bytesleft -= sizeof(beaconsize); |
527 | *pbeaconinfo += sizeof(beaconsize); | 527 | *pbeaconinfo += sizeof(beaconsize); |
528 | } | 528 | } |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index e34675c2f8fc..5316074f39f0 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -545,11 +545,11 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, | |||
545 | * be padded. Unaligned access might also happen if the length_info | 545 | * be padded. Unaligned access might also happen if the length_info |
546 | * structure is not present. | 546 | * structure is not present. |
547 | */ | 547 | */ |
548 | if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) | 548 | if (get_unaligned_le16(&length_info->tag) == RX_LENGTH_INFO_TAG) |
549 | { | 549 | { |
550 | unsigned int l, k, n; | 550 | unsigned int l, k, n; |
551 | for (i = 0, l = 0;; i++) { | 551 | for (i = 0, l = 0;; i++) { |
552 | k = le16_to_cpu(get_unaligned(&length_info->length[i])); | 552 | k = get_unaligned_le16(&length_info->length[i]); |
553 | if (k == 0) | 553 | if (k == 0) |
554 | return; | 554 | return; |
555 | n = l+k; | 555 | n = l+k; |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 24640726f8bb..57e1f495b9fc 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -1062,7 +1062,7 @@ static int yellowfin_rx(struct net_device *dev) | |||
1062 | buf_addr = rx_skb->data; | 1062 | buf_addr = rx_skb->data; |
1063 | data_size = (le32_to_cpu(desc->dbdma_cmd) - | 1063 | data_size = (le32_to_cpu(desc->dbdma_cmd) - |
1064 | le32_to_cpu(desc->result_status)) & 0xffff; | 1064 | le32_to_cpu(desc->result_status)) & 0xffff; |
1065 | frame_status = le16_to_cpu(get_unaligned((__le16*)&(buf_addr[data_size - 2]))); | 1065 | frame_status = get_unaligned_le16(&(buf_addr[data_size - 2])); |
1066 | if (yellowfin_debug > 4) | 1066 | if (yellowfin_debug > 4) |
1067 | printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n", | 1067 | printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n", |
1068 | frame_status); | 1068 | frame_status); |
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c index e07492be1f4a..208dd12825bc 100644 --- a/drivers/nubus/proc.c +++ b/drivers/nubus/proc.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/nubus.h> | 22 | #include <linux/nubus.h> |
23 | #include <linux/proc_fs.h> | 23 | #include <linux/proc_fs.h> |
24 | #include <linux/seq_file.h> | ||
24 | #include <linux/init.h> | 25 | #include <linux/init.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
26 | 27 | ||
@@ -28,38 +29,36 @@ | |||
28 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
29 | 30 | ||
30 | static int | 31 | static int |
31 | get_nubus_dev_info(char *buf, char **start, off_t pos, int count) | 32 | nubus_devices_proc_show(struct seq_file *m, void *v) |
32 | { | 33 | { |
33 | struct nubus_dev *dev = nubus_devices; | 34 | struct nubus_dev *dev = nubus_devices; |
34 | off_t at = 0; | ||
35 | int len, cnt; | ||
36 | 35 | ||
37 | cnt = 0; | 36 | while (dev) { |
38 | while (dev && count > cnt) { | 37 | seq_printf(m, "%x\t%04x %04x %04x %04x", |
39 | len = sprintf(buf, "%x\t%04x %04x %04x %04x", | ||
40 | dev->board->slot, | 38 | dev->board->slot, |
41 | dev->category, | 39 | dev->category, |
42 | dev->type, | 40 | dev->type, |
43 | dev->dr_sw, | 41 | dev->dr_sw, |
44 | dev->dr_hw); | 42 | dev->dr_hw); |
45 | len += sprintf(buf+len, | 43 | seq_printf(m, "\t%08lx\n", dev->board->slot_addr); |
46 | "\t%08lx", | ||
47 | dev->board->slot_addr); | ||
48 | buf[len++] = '\n'; | ||
49 | at += len; | ||
50 | if (at >= pos) { | ||
51 | if (!*start) { | ||
52 | *start = buf + (pos - (at - len)); | ||
53 | cnt = at - pos; | ||
54 | } else | ||
55 | cnt += len; | ||
56 | buf += len; | ||
57 | } | ||
58 | dev = dev->next; | 44 | dev = dev->next; |
59 | } | 45 | } |
60 | return (count > cnt) ? cnt : count; | 46 | return 0; |
47 | } | ||
48 | |||
49 | static int nubus_devices_proc_open(struct inode *inode, struct file *file) | ||
50 | { | ||
51 | return single_open(file, nubus_devices_proc_show, NULL); | ||
61 | } | 52 | } |
62 | 53 | ||
54 | static const struct file_operations nubus_devices_proc_fops = { | ||
55 | .owner = THIS_MODULE, | ||
56 | .open = nubus_devices_proc_open, | ||
57 | .read = seq_read, | ||
58 | .llseek = seq_lseek, | ||
59 | .release = single_release, | ||
60 | }; | ||
61 | |||
63 | static struct proc_dir_entry *proc_bus_nubus_dir; | 62 | static struct proc_dir_entry *proc_bus_nubus_dir; |
64 | 63 | ||
65 | static void nubus_proc_subdir(struct nubus_dev* dev, | 64 | static void nubus_proc_subdir(struct nubus_dev* dev, |
@@ -171,8 +170,7 @@ void __init nubus_proc_init(void) | |||
171 | { | 170 | { |
172 | if (!MACH_IS_MAC) | 171 | if (!MACH_IS_MAC) |
173 | return; | 172 | return; |
174 | proc_bus_nubus_dir = proc_mkdir("nubus", proc_bus); | 173 | proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL); |
175 | create_proc_info_entry("devices", 0, proc_bus_nubus_dir, | 174 | proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops); |
176 | get_nubus_dev_info); | ||
177 | proc_bus_nubus_add_devices(); | 175 | proc_bus_nubus_add_devices(); |
178 | } | 176 | } |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 62db3c3fe4dc..07d2a8d4498f 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -1551,8 +1551,7 @@ static int __init ccio_probe(struct parisc_device *dev) | |||
1551 | { | 1551 | { |
1552 | int i; | 1552 | int i; |
1553 | struct ioc *ioc, **ioc_p = &ioc_list; | 1553 | struct ioc *ioc, **ioc_p = &ioc_list; |
1554 | struct proc_dir_entry *info_entry, *bitmap_entry; | 1554 | |
1555 | |||
1556 | ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); | 1555 | ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); |
1557 | if (ioc == NULL) { | 1556 | if (ioc == NULL) { |
1558 | printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); | 1557 | printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); |
@@ -1580,13 +1579,10 @@ static int __init ccio_probe(struct parisc_device *dev) | |||
1580 | HBA_DATA(dev->dev.platform_data)->iommu = ioc; | 1579 | HBA_DATA(dev->dev.platform_data)->iommu = ioc; |
1581 | 1580 | ||
1582 | if (ioc_count == 0) { | 1581 | if (ioc_count == 0) { |
1583 | info_entry = create_proc_entry(MODULE_NAME, 0, proc_runway_root); | 1582 | proc_create(MODULE_NAME, 0, proc_runway_root, |
1584 | if (info_entry) | 1583 | &ccio_proc_info_fops); |
1585 | info_entry->proc_fops = &ccio_proc_info_fops; | 1584 | proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root, |
1586 | 1585 | &ccio_proc_bitmap_fops); | |
1587 | bitmap_entry = create_proc_entry(MODULE_NAME"-bitmap", 0, proc_runway_root); | ||
1588 | if (bitmap_entry) | ||
1589 | bitmap_entry->proc_fops = &ccio_proc_bitmap_fops; | ||
1590 | } | 1586 | } |
1591 | 1587 | ||
1592 | ioc_count++; | 1588 | ioc_count++; |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 8c4d2c13d5f2..afc849bd3f58 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -1895,7 +1895,9 @@ sba_driver_callback(struct parisc_device *dev) | |||
1895 | int i; | 1895 | int i; |
1896 | char *version; | 1896 | char *version; |
1897 | void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); | 1897 | void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); |
1898 | struct proc_dir_entry *info_entry, *bitmap_entry, *root; | 1898 | #ifdef CONFIG_PROC_FS |
1899 | struct proc_dir_entry *root; | ||
1900 | #endif | ||
1899 | 1901 | ||
1900 | sba_dump_ranges(sba_addr); | 1902 | sba_dump_ranges(sba_addr); |
1901 | 1903 | ||
@@ -1973,14 +1975,8 @@ sba_driver_callback(struct parisc_device *dev) | |||
1973 | break; | 1975 | break; |
1974 | } | 1976 | } |
1975 | 1977 | ||
1976 | info_entry = create_proc_entry("sba_iommu", 0, root); | 1978 | proc_create("sba_iommu", 0, root, &sba_proc_fops); |
1977 | bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root); | 1979 | proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops); |
1978 | |||
1979 | if (info_entry) | ||
1980 | info_entry->proc_fops = &sba_proc_fops; | ||
1981 | |||
1982 | if (bitmap_entry) | ||
1983 | bitmap_entry->proc_fops = &sba_proc_bitmap_fops; | ||
1984 | #endif | 1980 | #endif |
1985 | 1981 | ||
1986 | parisc_vmerge_boundary = IOVP_SIZE; | 1982 | parisc_vmerge_boundary = IOVP_SIZE; |
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index a85808938205..e71092e80288 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c | |||
@@ -3082,6 +3082,7 @@ static struct pci_driver parport_pc_pci_driver; | |||
3082 | static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;} | 3082 | static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;} |
3083 | #endif /* CONFIG_PCI */ | 3083 | #endif /* CONFIG_PCI */ |
3084 | 3084 | ||
3085 | #ifdef CONFIG_PNP | ||
3085 | 3086 | ||
3086 | static const struct pnp_device_id parport_pc_pnp_tbl[] = { | 3087 | static const struct pnp_device_id parport_pc_pnp_tbl[] = { |
3087 | /* Standard LPT Printer Port */ | 3088 | /* Standard LPT Printer Port */ |
@@ -3148,6 +3149,9 @@ static struct pnp_driver parport_pc_pnp_driver = { | |||
3148 | .remove = parport_pc_pnp_remove, | 3149 | .remove = parport_pc_pnp_remove, |
3149 | }; | 3150 | }; |
3150 | 3151 | ||
3152 | #else | ||
3153 | static struct pnp_driver parport_pc_pnp_driver; | ||
3154 | #endif /* CONFIG_PNP */ | ||
3151 | 3155 | ||
3152 | static int __devinit parport_pc_platform_probe(struct platform_device *pdev) | 3156 | static int __devinit parport_pc_platform_probe(struct platform_device *pdev) |
3153 | { | 3157 | { |
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index ef18fcd641e2..963a97642ae9 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c | |||
@@ -293,6 +293,7 @@ static int proc_bus_pci_release(struct inode *inode, struct file *file) | |||
293 | #endif /* HAVE_PCI_MMAP */ | 293 | #endif /* HAVE_PCI_MMAP */ |
294 | 294 | ||
295 | static const struct file_operations proc_bus_pci_operations = { | 295 | static const struct file_operations proc_bus_pci_operations = { |
296 | .owner = THIS_MODULE, | ||
296 | .llseek = proc_bus_pci_lseek, | 297 | .llseek = proc_bus_pci_lseek, |
297 | .read = proc_bus_pci_read, | 298 | .read = proc_bus_pci_read, |
298 | .write = proc_bus_pci_write, | 299 | .write = proc_bus_pci_write, |
@@ -406,11 +407,10 @@ int pci_proc_attach_device(struct pci_dev *dev) | |||
406 | } | 407 | } |
407 | 408 | ||
408 | sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); | 409 | sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); |
409 | e = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir); | 410 | e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir, |
411 | &proc_bus_pci_operations, dev); | ||
410 | if (!e) | 412 | if (!e) |
411 | return -ENOMEM; | 413 | return -ENOMEM; |
412 | e->proc_fops = &proc_bus_pci_operations; | ||
413 | e->data = dev; | ||
414 | e->size = dev->cfg_size; | 414 | e->size = dev->cfg_size; |
415 | dev->procent = e; | 415 | dev->procent = e; |
416 | 416 | ||
@@ -462,6 +462,7 @@ static int proc_bus_pci_dev_open(struct inode *inode, struct file *file) | |||
462 | return seq_open(file, &proc_bus_pci_devices_op); | 462 | return seq_open(file, &proc_bus_pci_devices_op); |
463 | } | 463 | } |
464 | static const struct file_operations proc_bus_pci_dev_operations = { | 464 | static const struct file_operations proc_bus_pci_dev_operations = { |
465 | .owner = THIS_MODULE, | ||
465 | .open = proc_bus_pci_dev_open, | 466 | .open = proc_bus_pci_dev_open, |
466 | .read = seq_read, | 467 | .read = seq_read, |
467 | .llseek = seq_lseek, | 468 | .llseek = seq_lseek, |
@@ -470,12 +471,10 @@ static const struct file_operations proc_bus_pci_dev_operations = { | |||
470 | 471 | ||
471 | static int __init pci_proc_init(void) | 472 | static int __init pci_proc_init(void) |
472 | { | 473 | { |
473 | struct proc_dir_entry *entry; | ||
474 | struct pci_dev *dev = NULL; | 474 | struct pci_dev *dev = NULL; |
475 | proc_bus_pci_dir = proc_mkdir("pci", proc_bus); | 475 | proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); |
476 | entry = create_proc_entry("devices", 0, proc_bus_pci_dir); | 476 | proc_create("devices", 0, proc_bus_pci_dir, |
477 | if (entry) | 477 | &proc_bus_pci_dev_operations); |
478 | entry->proc_fops = &proc_bus_pci_dev_operations; | ||
479 | proc_initialized = 1; | 478 | proc_initialized = 1; |
480 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 479 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
481 | pci_proc_attach_device(dev); | 480 | pci_proc_attach_device(dev); |
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 06a85d7d5aa2..36379535f9da 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c | |||
@@ -402,15 +402,6 @@ EXPORT_SYMBOL(pcmcia_replace_cis); | |||
402 | 402 | ||
403 | ======================================================================*/ | 403 | ======================================================================*/ |
404 | 404 | ||
405 | static inline u16 cis_get_u16(void *ptr) | ||
406 | { | ||
407 | return le16_to_cpu(get_unaligned((__le16 *) ptr)); | ||
408 | } | ||
409 | static inline u32 cis_get_u32(void *ptr) | ||
410 | { | ||
411 | return le32_to_cpu(get_unaligned((__le32 *) ptr)); | ||
412 | } | ||
413 | |||
414 | typedef struct tuple_flags { | 405 | typedef struct tuple_flags { |
415 | u_int link_space:4; | 406 | u_int link_space:4; |
416 | u_int has_link:1; | 407 | u_int has_link:1; |
@@ -471,7 +462,7 @@ static int follow_link(struct pcmcia_socket *s, tuple_t *tuple) | |||
471 | /* Get indirect link from the MFC tuple */ | 462 | /* Get indirect link from the MFC tuple */ |
472 | read_cis_cache(s, LINK_SPACE(tuple->Flags), | 463 | read_cis_cache(s, LINK_SPACE(tuple->Flags), |
473 | tuple->LinkOffset, 5, link); | 464 | tuple->LinkOffset, 5, link); |
474 | ofs = cis_get_u32(link + 1); | 465 | ofs = get_unaligned_le32(link + 1); |
475 | SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); | 466 | SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); |
476 | /* Move to the next indirect link */ | 467 | /* Move to the next indirect link */ |
477 | tuple->LinkOffset += 5; | 468 | tuple->LinkOffset += 5; |
@@ -679,8 +670,8 @@ static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum) | |||
679 | if (tuple->TupleDataLen < 5) | 670 | if (tuple->TupleDataLen < 5) |
680 | return CS_BAD_TUPLE; | 671 | return CS_BAD_TUPLE; |
681 | p = (u_char *) tuple->TupleData; | 672 | p = (u_char *) tuple->TupleData; |
682 | csum->addr = tuple->CISOffset + cis_get_u16(p) - 2; | 673 | csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2; |
683 | csum->len = cis_get_u16(p + 2); | 674 | csum->len = get_unaligned_le16(p + 2); |
684 | csum->sum = *(p + 4); | 675 | csum->sum = *(p + 4); |
685 | return CS_SUCCESS; | 676 | return CS_SUCCESS; |
686 | } | 677 | } |
@@ -691,7 +682,7 @@ static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link) | |||
691 | { | 682 | { |
692 | if (tuple->TupleDataLen < 4) | 683 | if (tuple->TupleDataLen < 4) |
693 | return CS_BAD_TUPLE; | 684 | return CS_BAD_TUPLE; |
694 | link->addr = cis_get_u32(tuple->TupleData); | 685 | link->addr = get_unaligned_le32(tuple->TupleData); |
695 | return CS_SUCCESS; | 686 | return CS_SUCCESS; |
696 | } | 687 | } |
697 | 688 | ||
@@ -710,7 +701,7 @@ static int parse_longlink_mfc(tuple_t *tuple, | |||
710 | return CS_BAD_TUPLE; | 701 | return CS_BAD_TUPLE; |
711 | for (i = 0; i < link->nfn; i++) { | 702 | for (i = 0; i < link->nfn; i++) { |
712 | link->fn[i].space = *p; p++; | 703 | link->fn[i].space = *p; p++; |
713 | link->fn[i].addr = cis_get_u32(p); | 704 | link->fn[i].addr = get_unaligned_le32(p); |
714 | p += 4; | 705 | p += 4; |
715 | } | 706 | } |
716 | return CS_SUCCESS; | 707 | return CS_SUCCESS; |
@@ -800,8 +791,8 @@ static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m) | |||
800 | { | 791 | { |
801 | if (tuple->TupleDataLen < 4) | 792 | if (tuple->TupleDataLen < 4) |
802 | return CS_BAD_TUPLE; | 793 | return CS_BAD_TUPLE; |
803 | m->manf = cis_get_u16(tuple->TupleData); | 794 | m->manf = get_unaligned_le16(tuple->TupleData); |
804 | m->card = cis_get_u16(tuple->TupleData + 2); | 795 | m->card = get_unaligned_le16(tuple->TupleData + 2); |
805 | return CS_SUCCESS; | 796 | return CS_SUCCESS; |
806 | } | 797 | } |
807 | 798 | ||
@@ -1100,7 +1091,7 @@ static int parse_cftable_entry(tuple_t *tuple, | |||
1100 | break; | 1091 | break; |
1101 | case 0x20: | 1092 | case 0x20: |
1102 | entry->mem.nwin = 1; | 1093 | entry->mem.nwin = 1; |
1103 | entry->mem.win[0].len = cis_get_u16(p) << 8; | 1094 | entry->mem.win[0].len = get_unaligned_le16(p) << 8; |
1104 | entry->mem.win[0].card_addr = 0; | 1095 | entry->mem.win[0].card_addr = 0; |
1105 | entry->mem.win[0].host_addr = 0; | 1096 | entry->mem.win[0].host_addr = 0; |
1106 | p += 2; | 1097 | p += 2; |
@@ -1108,8 +1099,8 @@ static int parse_cftable_entry(tuple_t *tuple, | |||
1108 | break; | 1099 | break; |
1109 | case 0x40: | 1100 | case 0x40: |
1110 | entry->mem.nwin = 1; | 1101 | entry->mem.nwin = 1; |
1111 | entry->mem.win[0].len = cis_get_u16(p) << 8; | 1102 | entry->mem.win[0].len = get_unaligned_le16(p) << 8; |
1112 | entry->mem.win[0].card_addr = cis_get_u16(p + 2) << 8; | 1103 | entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8; |
1113 | entry->mem.win[0].host_addr = 0; | 1104 | entry->mem.win[0].host_addr = 0; |
1114 | p += 4; | 1105 | p += 4; |
1115 | if (p > q) return CS_BAD_TUPLE; | 1106 | if (p > q) return CS_BAD_TUPLE; |
@@ -1146,7 +1137,7 @@ static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar) | |||
1146 | p = (u_char *)tuple->TupleData; | 1137 | p = (u_char *)tuple->TupleData; |
1147 | bar->attr = *p; | 1138 | bar->attr = *p; |
1148 | p += 2; | 1139 | p += 2; |
1149 | bar->size = cis_get_u32(p); | 1140 | bar->size = get_unaligned_le32(p); |
1150 | return CS_SUCCESS; | 1141 | return CS_SUCCESS; |
1151 | } | 1142 | } |
1152 | 1143 | ||
@@ -1159,7 +1150,7 @@ static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config) | |||
1159 | return CS_BAD_TUPLE; | 1150 | return CS_BAD_TUPLE; |
1160 | config->last_idx = *(++p); | 1151 | config->last_idx = *(++p); |
1161 | p++; | 1152 | p++; |
1162 | config->base = cis_get_u32(p); | 1153 | config->base = get_unaligned_le32(p); |
1163 | config->subtuples = tuple->TupleDataLen - 6; | 1154 | config->subtuples = tuple->TupleDataLen - 6; |
1164 | return CS_SUCCESS; | 1155 | return CS_SUCCESS; |
1165 | } | 1156 | } |
@@ -1275,7 +1266,7 @@ static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2) | |||
1275 | 1266 | ||
1276 | v2->vers = p[0]; | 1267 | v2->vers = p[0]; |
1277 | v2->comply = p[1]; | 1268 | v2->comply = p[1]; |
1278 | v2->dindex = cis_get_u16(p +2 ); | 1269 | v2->dindex = get_unaligned_le16(p +2 ); |
1279 | v2->vspec8 = p[6]; | 1270 | v2->vspec8 = p[6]; |
1280 | v2->vspec9 = p[7]; | 1271 | v2->vspec9 = p[7]; |
1281 | v2->nhdr = p[8]; | 1272 | v2->nhdr = p[8]; |
@@ -1316,8 +1307,8 @@ static int parse_format(tuple_t *tuple, cistpl_format_t *fmt) | |||
1316 | 1307 | ||
1317 | fmt->type = p[0]; | 1308 | fmt->type = p[0]; |
1318 | fmt->edc = p[1]; | 1309 | fmt->edc = p[1]; |
1319 | fmt->offset = cis_get_u32(p + 2); | 1310 | fmt->offset = get_unaligned_le32(p + 2); |
1320 | fmt->length = cis_get_u32(p + 6); | 1311 | fmt->length = get_unaligned_le32(p + 6); |
1321 | 1312 | ||
1322 | return CS_SUCCESS; | 1313 | return CS_SUCCESS; |
1323 | } | 1314 | } |
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c index 2b8266c3d40f..3f94edab25fa 100644 --- a/drivers/pnp/isapnp/proc.c +++ b/drivers/pnp/isapnp/proc.c | |||
@@ -85,6 +85,7 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf, | |||
85 | } | 85 | } |
86 | 86 | ||
87 | static const struct file_operations isapnp_proc_bus_file_operations = { | 87 | static const struct file_operations isapnp_proc_bus_file_operations = { |
88 | .owner = THIS_MODULE, | ||
88 | .llseek = isapnp_proc_bus_lseek, | 89 | .llseek = isapnp_proc_bus_lseek, |
89 | .read = isapnp_proc_bus_read, | 90 | .read = isapnp_proc_bus_read, |
90 | }; | 91 | }; |
@@ -102,12 +103,10 @@ static int isapnp_proc_attach_device(struct pnp_dev *dev) | |||
102 | return -ENOMEM; | 103 | return -ENOMEM; |
103 | } | 104 | } |
104 | sprintf(name, "%02x", dev->number); | 105 | sprintf(name, "%02x", dev->number); |
105 | e = dev->procent = create_proc_entry(name, S_IFREG | S_IRUGO, de); | 106 | e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de, |
107 | &isapnp_proc_bus_file_operations, dev); | ||
106 | if (!e) | 108 | if (!e) |
107 | return -ENOMEM; | 109 | return -ENOMEM; |
108 | e->proc_fops = &isapnp_proc_bus_file_operations; | ||
109 | e->owner = THIS_MODULE; | ||
110 | e->data = dev; | ||
111 | e->size = 256; | 110 | e->size = 256; |
112 | return 0; | 111 | return 0; |
113 | } | 112 | } |
@@ -116,7 +115,7 @@ int __init isapnp_proc_init(void) | |||
116 | { | 115 | { |
117 | struct pnp_dev *dev; | 116 | struct pnp_dev *dev; |
118 | 117 | ||
119 | isapnp_proc_bus_dir = proc_mkdir("isapnp", proc_bus); | 118 | isapnp_proc_bus_dir = proc_mkdir("bus/isapnp", NULL); |
120 | protocol_for_each_dev(&isapnp_protocol, dev) { | 119 | protocol_for_each_dev(&isapnp_protocol, dev) { |
121 | isapnp_proc_attach_device(dev); | 120 | isapnp_proc_attach_device(dev); |
122 | } | 121 | } |
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c index bb19bc957bad..46d506f66259 100644 --- a/drivers/pnp/pnpbios/proc.c +++ b/drivers/pnp/pnpbios/proc.c | |||
@@ -256,7 +256,7 @@ int pnpbios_interface_attach_device(struct pnp_bios_node *node) | |||
256 | */ | 256 | */ |
257 | int __init pnpbios_proc_init(void) | 257 | int __init pnpbios_proc_init(void) |
258 | { | 258 | { |
259 | proc_pnp = proc_mkdir("pnp", proc_bus); | 259 | proc_pnp = proc_mkdir("bus/pnp", NULL); |
260 | if (!proc_pnp) | 260 | if (!proc_pnp) |
261 | return -EIO; | 261 | return -EIO; |
262 | proc_pnp_boot = proc_mkdir("boot", proc_pnp); | 262 | proc_pnp_boot = proc_mkdir("boot", proc_pnp); |
@@ -294,5 +294,5 @@ void __exit pnpbios_proc_exit(void) | |||
294 | remove_proc_entry("configuration_info", proc_pnp); | 294 | remove_proc_entry("configuration_info", proc_pnp); |
295 | remove_proc_entry("devices", proc_pnp); | 295 | remove_proc_entry("devices", proc_pnp); |
296 | remove_proc_entry("boot", proc_pnp); | 296 | remove_proc_entry("boot", proc_pnp); |
297 | remove_proc_entry("pnp", proc_bus); | 297 | remove_proc_entry("bus/pnp", NULL); |
298 | } | 298 | } |
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index bdb9b7285b3d..71be36f18709 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c | |||
@@ -262,7 +262,7 @@ static void ds2760_battery_work(struct work_struct *work) | |||
262 | struct ds2760_device_info, monitor_work.work); | 262 | struct ds2760_device_info, monitor_work.work); |
263 | const int interval = HZ * 60; | 263 | const int interval = HZ * 60; |
264 | 264 | ||
265 | dev_dbg(di->dev, "%s\n", __FUNCTION__); | 265 | dev_dbg(di->dev, "%s\n", __func__); |
266 | 266 | ||
267 | ds2760_battery_update_status(di); | 267 | ds2760_battery_update_status(di); |
268 | queue_delayed_work(di->monitor_wqueue, &di->monitor_work, interval); | 268 | queue_delayed_work(di->monitor_wqueue, &di->monitor_work, interval); |
@@ -275,7 +275,7 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy) | |||
275 | { | 275 | { |
276 | struct ds2760_device_info *di = to_ds2760_device_info(psy); | 276 | struct ds2760_device_info *di = to_ds2760_device_info(psy); |
277 | 277 | ||
278 | dev_dbg(di->dev, "%s\n", __FUNCTION__); | 278 | dev_dbg(di->dev, "%s\n", __func__); |
279 | 279 | ||
280 | cancel_delayed_work(&di->monitor_work); | 280 | cancel_delayed_work(&di->monitor_work); |
281 | queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); | 281 | queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); |
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index af7a231092a4..ab1e8289f07f 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c | |||
@@ -315,7 +315,6 @@ static int __init olpc_bat_init(void) | |||
315 | if (ret) | 315 | if (ret) |
316 | goto battery_failed; | 316 | goto battery_failed; |
317 | 317 | ||
318 | olpc_register_battery_callback(&olpc_battery_trigger_uevent); | ||
319 | goto success; | 318 | goto success; |
320 | 319 | ||
321 | battery_failed: | 320 | battery_failed: |
@@ -328,7 +327,6 @@ success: | |||
328 | 327 | ||
329 | static void __exit olpc_bat_exit(void) | 328 | static void __exit olpc_bat_exit(void) |
330 | { | 329 | { |
331 | olpc_deregister_battery_callback(); | ||
332 | power_supply_unregister(&olpc_bat); | 330 | power_supply_unregister(&olpc_bat); |
333 | power_supply_unregister(&olpc_ac); | 331 | power_supply_unregister(&olpc_ac); |
334 | platform_device_unregister(bat_pdev); | 332 | platform_device_unregister(bat_pdev); |
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 03d6a38464ef..138dd76ee347 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c | |||
@@ -39,7 +39,7 @@ static void power_supply_changed_work(struct work_struct *work) | |||
39 | struct power_supply *psy = container_of(work, struct power_supply, | 39 | struct power_supply *psy = container_of(work, struct power_supply, |
40 | changed_work); | 40 | changed_work); |
41 | 41 | ||
42 | dev_dbg(psy->dev, "%s\n", __FUNCTION__); | 42 | dev_dbg(psy->dev, "%s\n", __func__); |
43 | 43 | ||
44 | class_for_each_device(power_supply_class, psy, | 44 | class_for_each_device(power_supply_class, psy, |
45 | __power_supply_changed_work); | 45 | __power_supply_changed_work); |
@@ -51,7 +51,7 @@ static void power_supply_changed_work(struct work_struct *work) | |||
51 | 51 | ||
52 | void power_supply_changed(struct power_supply *psy) | 52 | void power_supply_changed(struct power_supply *psy) |
53 | { | 53 | { |
54 | dev_dbg(psy->dev, "%s\n", __FUNCTION__); | 54 | dev_dbg(psy->dev, "%s\n", __func__); |
55 | 55 | ||
56 | schedule_work(&psy->changed_work); | 56 | schedule_work(&psy->changed_work); |
57 | } | 57 | } |
@@ -82,7 +82,7 @@ int power_supply_am_i_supplied(struct power_supply *psy) | |||
82 | error = class_for_each_device(power_supply_class, psy, | 82 | error = class_for_each_device(power_supply_class, psy, |
83 | __power_supply_am_i_supplied); | 83 | __power_supply_am_i_supplied); |
84 | 84 | ||
85 | dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, error); | 85 | dev_dbg(psy->dev, "%s %d\n", __func__, error); |
86 | 86 | ||
87 | return error; | 87 | return error; |
88 | } | 88 | } |
diff --git a/drivers/power/power_supply_leds.c b/drivers/power/power_supply_leds.c index fa3034f85c38..2dece40c544f 100644 --- a/drivers/power/power_supply_leds.c +++ b/drivers/power/power_supply_leds.c | |||
@@ -24,7 +24,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy) | |||
24 | if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status)) | 24 | if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status)) |
25 | return; | 25 | return; |
26 | 26 | ||
27 | dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, status.intval); | 27 | dev_dbg(psy->dev, "%s %d\n", __func__, status.intval); |
28 | 28 | ||
29 | switch (status.intval) { | 29 | switch (status.intval) { |
30 | case POWER_SUPPLY_STATUS_FULL: | 30 | case POWER_SUPPLY_STATUS_FULL: |
@@ -101,7 +101,7 @@ static void power_supply_update_gen_leds(struct power_supply *psy) | |||
101 | if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &online)) | 101 | if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &online)) |
102 | return; | 102 | return; |
103 | 103 | ||
104 | dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, online.intval); | 104 | dev_dbg(psy->dev, "%s %d\n", __func__, online.intval); |
105 | 105 | ||
106 | if (online.intval) | 106 | if (online.intval) |
107 | led_trigger_event(psy->online_trig, LED_FULL); | 107 | led_trigger_event(psy->online_trig, LED_FULL); |
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index 4f28045d9ef2..8624f55d0560 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
@@ -419,7 +419,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev) | |||
419 | return -ENOMEM; | 419 | return -ENOMEM; |
420 | 420 | ||
421 | rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE); | 421 | rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE); |
422 | if (unlikely(IS_ERR(rtc))) { | 422 | if (IS_ERR(rtc)) { |
423 | ret = PTR_ERR(rtc->rtc_dev); | 423 | ret = PTR_ERR(rtc->rtc_dev); |
424 | goto err; | 424 | goto err; |
425 | } | 425 | } |
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c index 8d300e6d0d9e..0c6257a034ff 100644 --- a/drivers/rtc/rtc-proc.c +++ b/drivers/rtc/rtc-proc.c | |||
@@ -108,12 +108,10 @@ void rtc_proc_add_device(struct rtc_device *rtc) | |||
108 | if (rtc->id == 0) { | 108 | if (rtc->id == 0) { |
109 | struct proc_dir_entry *ent; | 109 | struct proc_dir_entry *ent; |
110 | 110 | ||
111 | ent = create_proc_entry("driver/rtc", 0, NULL); | 111 | ent = proc_create_data("driver/rtc", 0, NULL, |
112 | if (ent) { | 112 | &rtc_proc_fops, rtc); |
113 | ent->proc_fops = &rtc_proc_fops; | 113 | if (ent) |
114 | ent->owner = rtc->owner; | 114 | ent->owner = rtc->owner; |
115 | ent->data = rtc; | ||
116 | } | ||
117 | } | 115 | } |
118 | } | 116 | } |
119 | 117 | ||
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 556063e8f7a9..03c0e40a92ff 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -157,6 +157,7 @@ static int dasd_devices_open(struct inode *inode, struct file *file) | |||
157 | } | 157 | } |
158 | 158 | ||
159 | static const struct file_operations dasd_devices_file_ops = { | 159 | static const struct file_operations dasd_devices_file_ops = { |
160 | .owner = THIS_MODULE, | ||
160 | .open = dasd_devices_open, | 161 | .open = dasd_devices_open, |
161 | .read = seq_read, | 162 | .read = seq_read, |
162 | .llseek = seq_lseek, | 163 | .llseek = seq_lseek, |
@@ -311,17 +312,16 @@ out_error: | |||
311 | int | 312 | int |
312 | dasd_proc_init(void) | 313 | dasd_proc_init(void) |
313 | { | 314 | { |
314 | dasd_proc_root_entry = proc_mkdir("dasd", &proc_root); | 315 | dasd_proc_root_entry = proc_mkdir("dasd", NULL); |
315 | if (!dasd_proc_root_entry) | 316 | if (!dasd_proc_root_entry) |
316 | goto out_nodasd; | 317 | goto out_nodasd; |
317 | dasd_proc_root_entry->owner = THIS_MODULE; | 318 | dasd_proc_root_entry->owner = THIS_MODULE; |
318 | dasd_devices_entry = create_proc_entry("devices", | 319 | dasd_devices_entry = proc_create("devices", |
319 | S_IFREG | S_IRUGO | S_IWUSR, | 320 | S_IFREG | S_IRUGO | S_IWUSR, |
320 | dasd_proc_root_entry); | 321 | dasd_proc_root_entry, |
322 | &dasd_devices_file_ops); | ||
321 | if (!dasd_devices_entry) | 323 | if (!dasd_devices_entry) |
322 | goto out_nodevices; | 324 | goto out_nodevices; |
323 | dasd_devices_entry->proc_fops = &dasd_devices_file_ops; | ||
324 | dasd_devices_entry->owner = THIS_MODULE; | ||
325 | dasd_statistics_entry = create_proc_entry("statistics", | 325 | dasd_statistics_entry = create_proc_entry("statistics", |
326 | S_IFREG | S_IRUGO | S_IWUSR, | 326 | S_IFREG | S_IRUGO | S_IWUSR, |
327 | dasd_proc_root_entry); | 327 | dasd_proc_root_entry); |
@@ -335,7 +335,7 @@ dasd_proc_init(void) | |||
335 | out_nostatistics: | 335 | out_nostatistics: |
336 | remove_proc_entry("devices", dasd_proc_root_entry); | 336 | remove_proc_entry("devices", dasd_proc_root_entry); |
337 | out_nodevices: | 337 | out_nodevices: |
338 | remove_proc_entry("dasd", &proc_root); | 338 | remove_proc_entry("dasd", NULL); |
339 | out_nodasd: | 339 | out_nodasd: |
340 | return -ENOENT; | 340 | return -ENOENT; |
341 | } | 341 | } |
@@ -345,5 +345,5 @@ dasd_proc_exit(void) | |||
345 | { | 345 | { |
346 | remove_proc_entry("devices", dasd_proc_root_entry); | 346 | remove_proc_entry("devices", dasd_proc_root_entry); |
347 | remove_proc_entry("statistics", dasd_proc_root_entry); | 347 | remove_proc_entry("statistics", dasd_proc_root_entry); |
348 | remove_proc_entry("dasd", &proc_root); | 348 | remove_proc_entry("dasd", NULL); |
349 | } | 349 | } |
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c index c9b96d51b28f..e7c888c14e71 100644 --- a/drivers/s390/char/tape_proc.c +++ b/drivers/s390/char/tape_proc.c | |||
@@ -111,6 +111,7 @@ static int tape_proc_open(struct inode *inode, struct file *file) | |||
111 | 111 | ||
112 | static const struct file_operations tape_proc_ops = | 112 | static const struct file_operations tape_proc_ops = |
113 | { | 113 | { |
114 | .owner = THIS_MODULE, | ||
114 | .open = tape_proc_open, | 115 | .open = tape_proc_open, |
115 | .read = seq_read, | 116 | .read = seq_read, |
116 | .llseek = seq_lseek, | 117 | .llseek = seq_lseek, |
@@ -124,14 +125,12 @@ void | |||
124 | tape_proc_init(void) | 125 | tape_proc_init(void) |
125 | { | 126 | { |
126 | tape_proc_devices = | 127 | tape_proc_devices = |
127 | create_proc_entry ("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, | 128 | proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL, |
128 | &proc_root); | 129 | &tape_proc_ops); |
129 | if (tape_proc_devices == NULL) { | 130 | if (tape_proc_devices == NULL) { |
130 | PRINT_WARN("tape: Cannot register procfs entry tapedevices\n"); | 131 | PRINT_WARN("tape: Cannot register procfs entry tapedevices\n"); |
131 | return; | 132 | return; |
132 | } | 133 | } |
133 | tape_proc_devices->proc_fops = &tape_proc_ops; | ||
134 | tape_proc_devices->owner = THIS_MODULE; | ||
135 | } | 134 | } |
136 | 135 | ||
137 | /* | 136 | /* |
@@ -141,5 +140,5 @@ void | |||
141 | tape_proc_cleanup(void) | 140 | tape_proc_cleanup(void) |
142 | { | 141 | { |
143 | if (tape_proc_devices != NULL) | 142 | if (tape_proc_devices != NULL) |
144 | remove_proc_entry ("tapedevices", &proc_root); | 143 | remove_proc_entry ("tapedevices", NULL); |
145 | } | 144 | } |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index e8597ec92247..40ef948fcb3a 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -374,13 +374,10 @@ cio_ignore_proc_init (void) | |||
374 | { | 374 | { |
375 | struct proc_dir_entry *entry; | 375 | struct proc_dir_entry *entry; |
376 | 376 | ||
377 | entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, | 377 | entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL, |
378 | &proc_root); | 378 | &cio_ignore_proc_fops); |
379 | if (!entry) | 379 | if (!entry) |
380 | return -ENOENT; | 380 | return -ENOENT; |
381 | |||
382 | entry->proc_fops = &cio_ignore_proc_fops; | ||
383 | |||
384 | return 0; | 381 | return 0; |
385 | } | 382 | } |
386 | 383 | ||
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 10aa1e780801..43876e287370 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -3632,7 +3632,7 @@ qdio_add_procfs_entry(void) | |||
3632 | { | 3632 | { |
3633 | proc_perf_file_registration=0; | 3633 | proc_perf_file_registration=0; |
3634 | qdio_perf_proc_file=create_proc_entry(QDIO_PERF, | 3634 | qdio_perf_proc_file=create_proc_entry(QDIO_PERF, |
3635 | S_IFREG|0444,&proc_root); | 3635 | S_IFREG|0444,NULL); |
3636 | if (qdio_perf_proc_file) { | 3636 | if (qdio_perf_proc_file) { |
3637 | qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; | 3637 | qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; |
3638 | } else proc_perf_file_registration=-1; | 3638 | } else proc_perf_file_registration=-1; |
@@ -3647,7 +3647,7 @@ static void | |||
3647 | qdio_remove_procfs_entry(void) | 3647 | qdio_remove_procfs_entry(void) |
3648 | { | 3648 | { |
3649 | if (!proc_perf_file_registration) /* means if it went ok earlier */ | 3649 | if (!proc_perf_file_registration) /* means if it went ok earlier */ |
3650 | remove_proc_entry(QDIO_PERF,&proc_root); | 3650 | remove_proc_entry(QDIO_PERF,NULL); |
3651 | } | 3651 | } |
3652 | 3652 | ||
3653 | /** | 3653 | /** |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index b135a1ed4b2c..18551aaf5e09 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -4996,7 +4996,7 @@ static int __init megaraid_init(void) | |||
4996 | max_mbox_busy_wait = MBOX_BUSY_WAIT; | 4996 | max_mbox_busy_wait = MBOX_BUSY_WAIT; |
4997 | 4997 | ||
4998 | #ifdef CONFIG_PROC_FS | 4998 | #ifdef CONFIG_PROC_FS |
4999 | mega_proc_dir_entry = proc_mkdir("megaraid", &proc_root); | 4999 | mega_proc_dir_entry = proc_mkdir("megaraid", NULL); |
5000 | if (!mega_proc_dir_entry) { | 5000 | if (!mega_proc_dir_entry) { |
5001 | printk(KERN_WARNING | 5001 | printk(KERN_WARNING |
5002 | "megaraid: failed to create megaraid root\n"); | 5002 | "megaraid: failed to create megaraid root\n"); |
@@ -5005,7 +5005,7 @@ static int __init megaraid_init(void) | |||
5005 | error = pci_register_driver(&megaraid_pci_driver); | 5005 | error = pci_register_driver(&megaraid_pci_driver); |
5006 | if (error) { | 5006 | if (error) { |
5007 | #ifdef CONFIG_PROC_FS | 5007 | #ifdef CONFIG_PROC_FS |
5008 | remove_proc_entry("megaraid", &proc_root); | 5008 | remove_proc_entry("megaraid", NULL); |
5009 | #endif | 5009 | #endif |
5010 | return error; | 5010 | return error; |
5011 | } | 5011 | } |
@@ -5035,7 +5035,7 @@ static void __exit megaraid_exit(void) | |||
5035 | pci_unregister_driver(&megaraid_pci_driver); | 5035 | pci_unregister_driver(&megaraid_pci_driver); |
5036 | 5036 | ||
5037 | #ifdef CONFIG_PROC_FS | 5037 | #ifdef CONFIG_PROC_FS |
5038 | remove_proc_entry("megaraid", &proc_root); | 5038 | remove_proc_entry("megaraid", NULL); |
5039 | #endif | 5039 | #endif |
5040 | } | 5040 | } |
5041 | 5041 | ||
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index b8de041bc0ae..a235802f2981 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
@@ -449,37 +449,40 @@ int scsi_get_device_flags(struct scsi_device *sdev, | |||
449 | } | 449 | } |
450 | 450 | ||
451 | #ifdef CONFIG_SCSI_PROC_FS | 451 | #ifdef CONFIG_SCSI_PROC_FS |
452 | /* | 452 | static int devinfo_seq_show(struct seq_file *m, void *v) |
453 | * proc_scsi_dev_info_read: dump the scsi_dev_info_list via | ||
454 | * /proc/scsi/device_info | ||
455 | */ | ||
456 | static int proc_scsi_devinfo_read(char *buffer, char **start, | ||
457 | off_t offset, int length) | ||
458 | { | 453 | { |
459 | struct scsi_dev_info_list *devinfo; | 454 | struct scsi_dev_info_list *devinfo = |
460 | int size, len = 0; | 455 | list_entry(v, struct scsi_dev_info_list, dev_info_list); |
461 | off_t begin = 0; | ||
462 | off_t pos = 0; | ||
463 | 456 | ||
464 | list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) { | 457 | seq_printf(m, "'%.8s' '%.16s' 0x%x\n", |
465 | size = sprintf(buffer + len, "'%.8s' '%.16s' 0x%x\n", | ||
466 | devinfo->vendor, devinfo->model, devinfo->flags); | 458 | devinfo->vendor, devinfo->model, devinfo->flags); |
467 | len += size; | 459 | return 0; |
468 | pos = begin + len; | 460 | } |
469 | if (pos < offset) { | 461 | |
470 | len = 0; | 462 | static void * devinfo_seq_start(struct seq_file *m, loff_t *pos) |
471 | begin = pos; | 463 | { |
472 | } | 464 | return seq_list_start(&scsi_dev_info_list, *pos); |
473 | if (pos > offset + length) | 465 | } |
474 | goto stop_output; | ||
475 | } | ||
476 | 466 | ||
477 | stop_output: | 467 | static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos) |
478 | *start = buffer + (offset - begin); /* Start of wanted data */ | 468 | { |
479 | len -= (offset - begin); /* Start slop */ | 469 | return seq_list_next(v, &scsi_dev_info_list, pos); |
480 | if (len > length) | 470 | } |
481 | len = length; /* Ending slop */ | 471 | |
482 | return (len); | 472 | static void devinfo_seq_stop(struct seq_file *m, void *v) |
473 | { | ||
474 | } | ||
475 | |||
476 | static const struct seq_operations scsi_devinfo_seq_ops = { | ||
477 | .start = devinfo_seq_start, | ||
478 | .next = devinfo_seq_next, | ||
479 | .stop = devinfo_seq_stop, | ||
480 | .show = devinfo_seq_show, | ||
481 | }; | ||
482 | |||
483 | static int proc_scsi_devinfo_open(struct inode *inode, struct file *file) | ||
484 | { | ||
485 | return seq_open(file, &scsi_devinfo_seq_ops); | ||
483 | } | 486 | } |
484 | 487 | ||
485 | /* | 488 | /* |
@@ -489,11 +492,12 @@ stop_output: | |||
489 | * integer value of flag to the scsi device info list. | 492 | * integer value of flag to the scsi device info list. |
490 | * To use, echo "vendor:model:flag" > /proc/scsi/device_info | 493 | * To use, echo "vendor:model:flag" > /proc/scsi/device_info |
491 | */ | 494 | */ |
492 | static int proc_scsi_devinfo_write(struct file *file, const char __user *buf, | 495 | static ssize_t proc_scsi_devinfo_write(struct file *file, |
493 | unsigned long length, void *data) | 496 | const char __user *buf, |
497 | size_t length, loff_t *ppos) | ||
494 | { | 498 | { |
495 | char *buffer; | 499 | char *buffer; |
496 | int err = length; | 500 | ssize_t err = length; |
497 | 501 | ||
498 | if (!buf || length>PAGE_SIZE) | 502 | if (!buf || length>PAGE_SIZE) |
499 | return -EINVAL; | 503 | return -EINVAL; |
@@ -517,6 +521,15 @@ out: | |||
517 | free_page((unsigned long)buffer); | 521 | free_page((unsigned long)buffer); |
518 | return err; | 522 | return err; |
519 | } | 523 | } |
524 | |||
525 | static const struct file_operations scsi_devinfo_proc_fops = { | ||
526 | .owner = THIS_MODULE, | ||
527 | .open = proc_scsi_devinfo_open, | ||
528 | .read = seq_read, | ||
529 | .write = proc_scsi_devinfo_write, | ||
530 | .llseek = seq_lseek, | ||
531 | .release = seq_release, | ||
532 | }; | ||
520 | #endif /* CONFIG_SCSI_PROC_FS */ | 533 | #endif /* CONFIG_SCSI_PROC_FS */ |
521 | 534 | ||
522 | module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0); | 535 | module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0); |
@@ -577,15 +590,13 @@ int __init scsi_init_devinfo(void) | |||
577 | } | 590 | } |
578 | 591 | ||
579 | #ifdef CONFIG_SCSI_PROC_FS | 592 | #ifdef CONFIG_SCSI_PROC_FS |
580 | p = create_proc_entry("scsi/device_info", 0, NULL); | 593 | p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops); |
581 | if (!p) { | 594 | if (!p) { |
582 | error = -ENOMEM; | 595 | error = -ENOMEM; |
583 | goto out; | 596 | goto out; |
584 | } | 597 | } |
585 | 598 | ||
586 | p->owner = THIS_MODULE; | 599 | p->owner = THIS_MODULE; |
587 | p->get_info = proc_scsi_devinfo_read; | ||
588 | p->write_proc = proc_scsi_devinfo_write; | ||
589 | #endif /* CONFIG_SCSI_PROC_FS */ | 600 | #endif /* CONFIG_SCSI_PROC_FS */ |
590 | 601 | ||
591 | out: | 602 | out: |
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 3a1c99d5c775..e4a0d2f9b357 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c | |||
@@ -413,6 +413,7 @@ static int proc_scsi_open(struct inode *inode, struct file *file) | |||
413 | } | 413 | } |
414 | 414 | ||
415 | static const struct file_operations proc_scsi_operations = { | 415 | static const struct file_operations proc_scsi_operations = { |
416 | .owner = THIS_MODULE, | ||
416 | .open = proc_scsi_open, | 417 | .open = proc_scsi_open, |
417 | .read = seq_read, | 418 | .read = seq_read, |
418 | .write = proc_scsi_write, | 419 | .write = proc_scsi_write, |
@@ -431,10 +432,9 @@ int __init scsi_init_procfs(void) | |||
431 | if (!proc_scsi) | 432 | if (!proc_scsi) |
432 | goto err1; | 433 | goto err1; |
433 | 434 | ||
434 | pde = create_proc_entry("scsi/scsi", 0, NULL); | 435 | pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations); |
435 | if (!pde) | 436 | if (!pde) |
436 | goto err2; | 437 | goto err2; |
437 | pde->proc_fops = &proc_scsi_operations; | ||
438 | 438 | ||
439 | return 0; | 439 | return 0; |
440 | 440 | ||
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index fcd7455ffc39..a00eee6f7be9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -1828,7 +1828,7 @@ void scsi_scan_host(struct Scsi_Host *shost) | |||
1828 | } | 1828 | } |
1829 | 1829 | ||
1830 | p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); | 1830 | p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); |
1831 | if (unlikely(IS_ERR(p))) | 1831 | if (IS_ERR(p)) |
1832 | do_scan_async(data); | 1832 | do_scan_async(data); |
1833 | } | 1833 | } |
1834 | EXPORT_SYMBOL(scsi_scan_host); | 1834 | EXPORT_SYMBOL(scsi_scan_host); |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 2029422bc04d..c9d7f721b9e2 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -2667,7 +2667,6 @@ sg_proc_init(void) | |||
2667 | { | 2667 | { |
2668 | int k, mask; | 2668 | int k, mask; |
2669 | int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); | 2669 | int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); |
2670 | struct proc_dir_entry *pdep; | ||
2671 | struct sg_proc_leaf * leaf; | 2670 | struct sg_proc_leaf * leaf; |
2672 | 2671 | ||
2673 | sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); | 2672 | sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); |
@@ -2676,13 +2675,10 @@ sg_proc_init(void) | |||
2676 | for (k = 0; k < num_leaves; ++k) { | 2675 | for (k = 0; k < num_leaves; ++k) { |
2677 | leaf = &sg_proc_leaf_arr[k]; | 2676 | leaf = &sg_proc_leaf_arr[k]; |
2678 | mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; | 2677 | mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; |
2679 | pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp); | 2678 | leaf->fops->owner = THIS_MODULE; |
2680 | if (pdep) { | 2679 | leaf->fops->read = seq_read; |
2681 | leaf->fops->owner = THIS_MODULE, | 2680 | leaf->fops->llseek = seq_lseek; |
2682 | leaf->fops->read = seq_read, | 2681 | proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); |
2683 | leaf->fops->llseek = seq_lseek, | ||
2684 | pdep->proc_fops = leaf->fops; | ||
2685 | } | ||
2686 | } | 2682 | } |
2687 | return 0; | 2683 | return 0; |
2688 | } | 2684 | } |
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index 4220f22b6660..5f71ff3aee35 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c | |||
@@ -305,8 +305,6 @@ enum { | |||
305 | */ | 305 | */ |
306 | 306 | ||
307 | #define FW_GET_BYTE(p) *((__u8 *) (p)) | 307 | #define FW_GET_BYTE(p) *((__u8 *) (p)) |
308 | #define FW_GET_WORD(p) le16_to_cpu(get_unaligned((__le16 *) (p))) | ||
309 | #define FW_GET_LONG(p) le32_to_cpu(get_unaligned((__le32 *) (p))) | ||
310 | 308 | ||
311 | #define FW_DIR "ueagle-atm/" | 309 | #define FW_DIR "ueagle-atm/" |
312 | #define NB_MODEM 4 | 310 | #define NB_MODEM 4 |
@@ -621,7 +619,7 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte | |||
621 | if (size < 4) | 619 | if (size < 4) |
622 | goto err_fw_corrupted; | 620 | goto err_fw_corrupted; |
623 | 621 | ||
624 | crc = FW_GET_LONG(pfw); | 622 | crc = get_unaligned_le32(pfw); |
625 | pfw += 4; | 623 | pfw += 4; |
626 | size -= 4; | 624 | size -= 4; |
627 | if (crc32_be(0, pfw, size) != crc) | 625 | if (crc32_be(0, pfw, size) != crc) |
@@ -640,7 +638,7 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte | |||
640 | 638 | ||
641 | while (size > 3) { | 639 | while (size > 3) { |
642 | u8 len = FW_GET_BYTE(pfw); | 640 | u8 len = FW_GET_BYTE(pfw); |
643 | u16 add = FW_GET_WORD(pfw + 1); | 641 | u16 add = get_unaligned_le16(pfw + 1); |
644 | 642 | ||
645 | size -= len + 3; | 643 | size -= len + 3; |
646 | if (size < 0) | 644 | if (size < 0) |
@@ -738,7 +736,7 @@ static int check_dsp_e1(u8 *dsp, unsigned int len) | |||
738 | 736 | ||
739 | for (i = 0; i < pagecount; i++) { | 737 | for (i = 0; i < pagecount; i++) { |
740 | 738 | ||
741 | pageoffset = FW_GET_LONG(dsp + p); | 739 | pageoffset = get_unaligned_le32(dsp + p); |
742 | p += 4; | 740 | p += 4; |
743 | 741 | ||
744 | if (pageoffset == 0) | 742 | if (pageoffset == 0) |
@@ -759,7 +757,7 @@ static int check_dsp_e1(u8 *dsp, unsigned int len) | |||
759 | return 1; | 757 | return 1; |
760 | 758 | ||
761 | pp += 2; /* skip blockaddr */ | 759 | pp += 2; /* skip blockaddr */ |
762 | blocksize = FW_GET_WORD(dsp + pp); | 760 | blocksize = get_unaligned_le16(dsp + pp); |
763 | pp += 2; | 761 | pp += 2; |
764 | 762 | ||
765 | /* enough space for block data? */ | 763 | /* enough space for block data? */ |
@@ -928,7 +926,7 @@ static void uea_load_page_e1(struct work_struct *work) | |||
928 | goto bad1; | 926 | goto bad1; |
929 | 927 | ||
930 | p += 4 * pageno; | 928 | p += 4 * pageno; |
931 | pageoffset = FW_GET_LONG(p); | 929 | pageoffset = get_unaligned_le32(p); |
932 | 930 | ||
933 | if (pageoffset == 0) | 931 | if (pageoffset == 0) |
934 | goto bad1; | 932 | goto bad1; |
@@ -945,10 +943,10 @@ static void uea_load_page_e1(struct work_struct *work) | |||
945 | bi.wOvlOffset = cpu_to_le16(ovl | 0x8000); | 943 | bi.wOvlOffset = cpu_to_le16(ovl | 0x8000); |
946 | 944 | ||
947 | for (i = 0; i < blockcount; i++) { | 945 | for (i = 0; i < blockcount; i++) { |
948 | blockaddr = FW_GET_WORD(p); | 946 | blockaddr = get_unaligned_le16(p); |
949 | p += 2; | 947 | p += 2; |
950 | 948 | ||
951 | blocksize = FW_GET_WORD(p); | 949 | blocksize = get_unaligned_le16(p); |
952 | p += 2; | 950 | p += 2; |
953 | 951 | ||
954 | bi.wSize = cpu_to_le16(blocksize); | 952 | bi.wSize = cpu_to_le16(blocksize); |
@@ -1152,9 +1150,9 @@ static int uea_cmv_e1(struct uea_softc *sc, | |||
1152 | cmv.bDirection = E1_HOSTTOMODEM; | 1150 | cmv.bDirection = E1_HOSTTOMODEM; |
1153 | cmv.bFunction = function; | 1151 | cmv.bFunction = function; |
1154 | cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx); | 1152 | cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx); |
1155 | put_unaligned(cpu_to_le32(address), &cmv.dwSymbolicAddress); | 1153 | put_unaligned_le32(address, &cmv.dwSymbolicAddress); |
1156 | cmv.wOffsetAddress = cpu_to_le16(offset); | 1154 | cmv.wOffsetAddress = cpu_to_le16(offset); |
1157 | put_unaligned(cpu_to_le32(data >> 16 | data << 16), &cmv.dwData); | 1155 | put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData); |
1158 | 1156 | ||
1159 | ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv); | 1157 | ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv); |
1160 | if (ret < 0) | 1158 | if (ret < 0) |
@@ -1646,7 +1644,7 @@ static int request_cmvs(struct uea_softc *sc, | |||
1646 | if (size < 5) | 1644 | if (size < 5) |
1647 | goto err_fw_corrupted; | 1645 | goto err_fw_corrupted; |
1648 | 1646 | ||
1649 | crc = FW_GET_LONG(data); | 1647 | crc = get_unaligned_le32(data); |
1650 | data += 4; | 1648 | data += 4; |
1651 | size -= 4; | 1649 | size -= 4; |
1652 | if (crc32_be(0, data, size) != crc) | 1650 | if (crc32_be(0, data, size) != crc) |
@@ -1696,9 +1694,9 @@ static int uea_send_cmvs_e1(struct uea_softc *sc) | |||
1696 | "please update your firmware\n"); | 1694 | "please update your firmware\n"); |
1697 | 1695 | ||
1698 | for (i = 0; i < len; i++) { | 1696 | for (i = 0; i < len; i++) { |
1699 | ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v1[i].address), | 1697 | ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v1[i].address), |
1700 | FW_GET_WORD(&cmvs_v1[i].offset), | 1698 | get_unaligned_le16(&cmvs_v1[i].offset), |
1701 | FW_GET_LONG(&cmvs_v1[i].data)); | 1699 | get_unaligned_le32(&cmvs_v1[i].data)); |
1702 | if (ret < 0) | 1700 | if (ret < 0) |
1703 | goto out; | 1701 | goto out; |
1704 | } | 1702 | } |
@@ -1706,9 +1704,9 @@ static int uea_send_cmvs_e1(struct uea_softc *sc) | |||
1706 | struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr; | 1704 | struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr; |
1707 | 1705 | ||
1708 | for (i = 0; i < len; i++) { | 1706 | for (i = 0; i < len; i++) { |
1709 | ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v2[i].address), | 1707 | ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v2[i].address), |
1710 | (u16) FW_GET_LONG(&cmvs_v2[i].offset), | 1708 | (u16) get_unaligned_le32(&cmvs_v2[i].offset), |
1711 | FW_GET_LONG(&cmvs_v2[i].data)); | 1709 | get_unaligned_le32(&cmvs_v2[i].data)); |
1712 | if (ret < 0) | 1710 | if (ret < 0) |
1713 | goto out; | 1711 | goto out; |
1714 | } | 1712 | } |
@@ -1759,10 +1757,10 @@ static int uea_send_cmvs_e4(struct uea_softc *sc) | |||
1759 | 1757 | ||
1760 | for (i = 0; i < len; i++) { | 1758 | for (i = 0; i < len; i++) { |
1761 | ret = uea_write_cmv_e4(sc, 1, | 1759 | ret = uea_write_cmv_e4(sc, 1, |
1762 | FW_GET_LONG(&cmvs_v2[i].group), | 1760 | get_unaligned_le32(&cmvs_v2[i].group), |
1763 | FW_GET_LONG(&cmvs_v2[i].address), | 1761 | get_unaligned_le32(&cmvs_v2[i].address), |
1764 | FW_GET_LONG(&cmvs_v2[i].offset), | 1762 | get_unaligned_le32(&cmvs_v2[i].offset), |
1765 | FW_GET_LONG(&cmvs_v2[i].data)); | 1763 | get_unaligned_le32(&cmvs_v2[i].data)); |
1766 | if (ret < 0) | 1764 | if (ret < 0) |
1767 | goto out; | 1765 | goto out; |
1768 | } | 1766 | } |
@@ -1964,7 +1962,7 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr) | |||
1964 | if (UEA_CHIP_VERSION(sc) == ADI930 | 1962 | if (UEA_CHIP_VERSION(sc) == ADI930 |
1965 | && cmv->bFunction == E1_MAKEFUNCTION(2, 2)) { | 1963 | && cmv->bFunction == E1_MAKEFUNCTION(2, 2)) { |
1966 | cmv->wIndex = cpu_to_le16(dsc->idx); | 1964 | cmv->wIndex = cpu_to_le16(dsc->idx); |
1967 | put_unaligned(cpu_to_le32(dsc->address), &cmv->dwSymbolicAddress); | 1965 | put_unaligned_le32(dsc->address, &cmv->dwSymbolicAddress); |
1968 | cmv->wOffsetAddress = cpu_to_le16(dsc->offset); | 1966 | cmv->wOffsetAddress = cpu_to_le16(dsc->offset); |
1969 | } else | 1967 | } else |
1970 | goto bad2; | 1968 | goto bad2; |
@@ -1978,11 +1976,11 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr) | |||
1978 | 1976 | ||
1979 | /* in case of MEMACCESS */ | 1977 | /* in case of MEMACCESS */ |
1980 | if (le16_to_cpu(cmv->wIndex) != dsc->idx || | 1978 | if (le16_to_cpu(cmv->wIndex) != dsc->idx || |
1981 | le32_to_cpu(get_unaligned(&cmv->dwSymbolicAddress)) != dsc->address || | 1979 | get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address || |
1982 | le16_to_cpu(cmv->wOffsetAddress) != dsc->offset) | 1980 | le16_to_cpu(cmv->wOffsetAddress) != dsc->offset) |
1983 | goto bad2; | 1981 | goto bad2; |
1984 | 1982 | ||
1985 | sc->data = le32_to_cpu(get_unaligned(&cmv->dwData)); | 1983 | sc->data = get_unaligned_le32(&cmv->dwData); |
1986 | sc->data = sc->data << 16 | sc->data >> 16; | 1984 | sc->data = sc->data << 16 | sc->data >> 16; |
1987 | 1985 | ||
1988 | wake_up_cmv_ack(sc); | 1986 | wake_up_cmv_ack(sc); |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 7b572e75e73c..cefe7f2c6f75 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -280,7 +280,7 @@ static void acm_ctrl_irq(struct urb *urb) | |||
280 | 280 | ||
281 | case USB_CDC_NOTIFY_SERIAL_STATE: | 281 | case USB_CDC_NOTIFY_SERIAL_STATE: |
282 | 282 | ||
283 | newctrl = le16_to_cpu(get_unaligned((__le16 *) data)); | 283 | newctrl = get_unaligned_le16(data); |
284 | 284 | ||
285 | if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { | 285 | if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { |
286 | dbg("calling hangup"); | 286 | dbg("calling hangup"); |
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 8607846e3c3f..1d253dd4ea81 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c | |||
@@ -773,7 +773,7 @@ int __init usbfs_init(void) | |||
773 | usb_register_notify(&usbfs_nb); | 773 | usb_register_notify(&usbfs_nb); |
774 | 774 | ||
775 | /* create mount point for usbfs */ | 775 | /* create mount point for usbfs */ |
776 | usbdir = proc_mkdir("usb", proc_bus); | 776 | usbdir = proc_mkdir("bus/usb", NULL); |
777 | 777 | ||
778 | return 0; | 778 | return 0; |
779 | } | 779 | } |
@@ -783,6 +783,6 @@ void usbfs_cleanup(void) | |||
783 | usb_unregister_notify(&usbfs_nb); | 783 | usb_unregister_notify(&usbfs_nb); |
784 | unregister_filesystem(&usb_fs_type); | 784 | unregister_filesystem(&usb_fs_type); |
785 | if (usbdir) | 785 | if (usbdir) |
786 | remove_proc_entry("usb", proc_bus); | 786 | remove_proc_entry("bus/usb", NULL); |
787 | } | 787 | } |
788 | 788 | ||
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index 9b913afb2e6d..274c60a970cd 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c | |||
@@ -231,6 +231,7 @@ static int proc_udc_open(struct inode *inode, struct file *file) | |||
231 | } | 231 | } |
232 | 232 | ||
233 | static const struct file_operations proc_ops = { | 233 | static const struct file_operations proc_ops = { |
234 | .owner = THIS_MODULE, | ||
234 | .open = proc_udc_open, | 235 | .open = proc_udc_open, |
235 | .read = seq_read, | 236 | .read = seq_read, |
236 | .llseek = seq_lseek, | 237 | .llseek = seq_lseek, |
@@ -239,15 +240,7 @@ static const struct file_operations proc_ops = { | |||
239 | 240 | ||
240 | static void create_debug_file(struct at91_udc *udc) | 241 | static void create_debug_file(struct at91_udc *udc) |
241 | { | 242 | { |
242 | struct proc_dir_entry *pde; | 243 | udc->pde = proc_create_data(debug_filename, 0, NULL, &proc_ops, udc); |
243 | |||
244 | pde = create_proc_entry (debug_filename, 0, NULL); | ||
245 | udc->pde = pde; | ||
246 | if (pde == NULL) | ||
247 | return; | ||
248 | |||
249 | pde->proc_fops = &proc_ops; | ||
250 | pde->data = udc; | ||
251 | } | 244 | } |
252 | 245 | ||
253 | static void remove_debug_file(struct at91_udc *udc) | 246 | static void remove_debug_file(struct at91_udc *udc) |
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c index 64a592cbbe7b..be6613afedbf 100644 --- a/drivers/usb/gadget/goku_udc.c +++ b/drivers/usb/gadget/goku_udc.c | |||
@@ -127,7 +127,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
127 | 127 | ||
128 | /* enabling the no-toggle interrupt mode would need an api hook */ | 128 | /* enabling the no-toggle interrupt mode would need an api hook */ |
129 | mode = 0; | 129 | mode = 0; |
130 | max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize)); | 130 | max = get_unaligned_le16(&desc->wMaxPacketSize); |
131 | switch (max) { | 131 | switch (max) { |
132 | case 64: mode++; | 132 | case 64: mode++; |
133 | case 32: mode++; | 133 | case 32: mode++; |
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 95f7662376f1..881d74c3d964 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c | |||
@@ -2504,6 +2504,7 @@ static int proc_udc_open(struct inode *inode, struct file *file) | |||
2504 | } | 2504 | } |
2505 | 2505 | ||
2506 | static const struct file_operations proc_ops = { | 2506 | static const struct file_operations proc_ops = { |
2507 | .owner = THIS_MODULE, | ||
2507 | .open = proc_udc_open, | 2508 | .open = proc_udc_open, |
2508 | .read = seq_read, | 2509 | .read = seq_read, |
2509 | .llseek = seq_lseek, | 2510 | .llseek = seq_lseek, |
@@ -2512,11 +2513,7 @@ static const struct file_operations proc_ops = { | |||
2512 | 2513 | ||
2513 | static void create_proc_file(void) | 2514 | static void create_proc_file(void) |
2514 | { | 2515 | { |
2515 | struct proc_dir_entry *pde; | 2516 | proc_create(proc_filename, 0, NULL, &proc_ops); |
2516 | |||
2517 | pde = create_proc_entry (proc_filename, 0, NULL); | ||
2518 | if (pde) | ||
2519 | pde->proc_fops = &proc_ops; | ||
2520 | } | 2517 | } |
2521 | 2518 | ||
2522 | static void remove_proc_file(void) | 2519 | static void remove_proc_file(void) |
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index bd58dd504f6f..d0677f5d3cd5 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c | |||
@@ -183,14 +183,10 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, | |||
183 | DBG("query OID %08x value, len %d:\n", OID, buf_len); | 183 | DBG("query OID %08x value, len %d:\n", OID, buf_len); |
184 | for (i = 0; i < buf_len; i += 16) { | 184 | for (i = 0; i < buf_len; i += 16) { |
185 | DBG("%03d: %08x %08x %08x %08x\n", i, | 185 | DBG("%03d: %08x %08x %08x %08x\n", i, |
186 | le32_to_cpu(get_unaligned((__le32 *) | 186 | get_unaligned_le32(&buf[i]), |
187 | &buf[i])), | 187 | get_unaligned_le32(&buf[i + 4]), |
188 | le32_to_cpu(get_unaligned((__le32 *) | 188 | get_unaligned_le32(&buf[i + 8]), |
189 | &buf[i + 4])), | 189 | get_unaligned_le32(&buf[i + 12])); |
190 | le32_to_cpu(get_unaligned((__le32 *) | ||
191 | &buf[i + 8])), | ||
192 | le32_to_cpu(get_unaligned((__le32 *) | ||
193 | &buf[i + 12]))); | ||
194 | } | 190 | } |
195 | } | 191 | } |
196 | 192 | ||
@@ -666,7 +662,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, | |||
666 | break; | 662 | break; |
667 | case OID_PNP_QUERY_POWER: | 663 | case OID_PNP_QUERY_POWER: |
668 | DBG("%s: OID_PNP_QUERY_POWER D%d\n", __func__, | 664 | DBG("%s: OID_PNP_QUERY_POWER D%d\n", __func__, |
669 | le32_to_cpu(get_unaligned((__le32 *)buf)) - 1); | 665 | get_unaligned_le32(buf) - 1); |
670 | /* only suspend is a real power state, and | 666 | /* only suspend is a real power state, and |
671 | * it can't be entered by OID_PNP_SET_POWER... | 667 | * it can't be entered by OID_PNP_SET_POWER... |
672 | */ | 668 | */ |
@@ -705,14 +701,10 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len, | |||
705 | DBG("set OID %08x value, len %d:\n", OID, buf_len); | 701 | DBG("set OID %08x value, len %d:\n", OID, buf_len); |
706 | for (i = 0; i < buf_len; i += 16) { | 702 | for (i = 0; i < buf_len; i += 16) { |
707 | DBG("%03d: %08x %08x %08x %08x\n", i, | 703 | DBG("%03d: %08x %08x %08x %08x\n", i, |
708 | le32_to_cpu(get_unaligned((__le32 *) | 704 | get_unaligned_le32(&buf[i]), |
709 | &buf[i])), | 705 | get_unaligned_le32(&buf[i + 4]), |
710 | le32_to_cpu(get_unaligned((__le32 *) | 706 | get_unaligned_le32(&buf[i + 8]), |
711 | &buf[i + 4])), | 707 | get_unaligned_le32(&buf[i + 12])); |
712 | le32_to_cpu(get_unaligned((__le32 *) | ||
713 | &buf[i + 8])), | ||
714 | le32_to_cpu(get_unaligned((__le32 *) | ||
715 | &buf[i + 12]))); | ||
716 | } | 708 | } |
717 | } | 709 | } |
718 | 710 | ||
@@ -726,8 +718,7 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len, | |||
726 | * PROMISCUOUS, DIRECTED, | 718 | * PROMISCUOUS, DIRECTED, |
727 | * MULTICAST, ALL_MULTICAST, BROADCAST | 719 | * MULTICAST, ALL_MULTICAST, BROADCAST |
728 | */ | 720 | */ |
729 | *params->filter = (u16) le32_to_cpu(get_unaligned( | 721 | *params->filter = (u16)get_unaligned_le32(buf); |
730 | (__le32 *)buf)); | ||
731 | DBG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n", | 722 | DBG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n", |
732 | __func__, *params->filter); | 723 | __func__, *params->filter); |
733 | 724 | ||
@@ -777,7 +768,7 @@ update_linkstate: | |||
777 | * resuming, Windows forces a reset, and then SET_POWER D0. | 768 | * resuming, Windows forces a reset, and then SET_POWER D0. |
778 | * FIXME ... then things go batty; Windows wedges itself. | 769 | * FIXME ... then things go batty; Windows wedges itself. |
779 | */ | 770 | */ |
780 | i = le32_to_cpu(get_unaligned((__le32 *)buf)); | 771 | i = get_unaligned_le32(buf); |
781 | DBG("%s: OID_PNP_SET_POWER D%d\n", __func__, i - 1); | 772 | DBG("%s: OID_PNP_SET_POWER D%d\n", __func__, i - 1); |
782 | switch (i) { | 773 | switch (i) { |
783 | case NdisDeviceStateD0: | 774 | case NdisDeviceStateD0: |
@@ -1064,8 +1055,8 @@ int rndis_msg_parser (u8 configNr, u8 *buf) | |||
1064 | return -ENOMEM; | 1055 | return -ENOMEM; |
1065 | 1056 | ||
1066 | tmp = (__le32 *) buf; | 1057 | tmp = (__le32 *) buf; |
1067 | MsgType = le32_to_cpu(get_unaligned(tmp++)); | 1058 | MsgType = get_unaligned_le32(tmp++); |
1068 | MsgLength = le32_to_cpu(get_unaligned(tmp++)); | 1059 | MsgLength = get_unaligned_le32(tmp++); |
1069 | 1060 | ||
1070 | if (configNr >= RNDIS_MAX_CONFIGS) | 1061 | if (configNr >= RNDIS_MAX_CONFIGS) |
1071 | return -ENOTSUPP; | 1062 | return -ENOTSUPP; |
@@ -1296,10 +1287,9 @@ int rndis_rm_hdr(struct sk_buff *skb) | |||
1296 | tmp++; | 1287 | tmp++; |
1297 | 1288 | ||
1298 | /* DataOffset, DataLength */ | 1289 | /* DataOffset, DataLength */ |
1299 | if (!skb_pull(skb, le32_to_cpu(get_unaligned(tmp++)) | 1290 | if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) |
1300 | + 8 /* offset of DataOffset */)) | ||
1301 | return -EOVERFLOW; | 1291 | return -EOVERFLOW; |
1302 | skb_trim(skb, le32_to_cpu(get_unaligned(tmp++))); | 1292 | skb_trim(skb, get_unaligned_le32(tmp++)); |
1303 | 1293 | ||
1304 | return 0; | 1294 | return 0; |
1305 | } | 1295 | } |
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 878e428a0ec1..4154be375c7a 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c | |||
@@ -74,7 +74,7 @@ static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len) | |||
74 | goto fail; | 74 | goto fail; |
75 | } else | 75 | } else |
76 | uchar = c; | 76 | uchar = c; |
77 | put_unaligned (cpu_to_le16 (uchar), cp++); | 77 | put_unaligned_le16(uchar, cp++); |
78 | count++; | 78 | count++; |
79 | len--; | 79 | len--; |
80 | } | 80 | } |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index f13d1029aeb2..382587c4457c 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -770,7 +770,7 @@ static int ehci_hub_control ( | |||
770 | if (status & ~0xffff) /* only if wPortChange is interesting */ | 770 | if (status & ~0xffff) /* only if wPortChange is interesting */ |
771 | #endif | 771 | #endif |
772 | dbg_port (ehci, "GetStatus", wIndex + 1, temp); | 772 | dbg_port (ehci, "GetStatus", wIndex + 1, temp); |
773 | put_unaligned(cpu_to_le32 (status), (__le32 *) buf); | 773 | put_unaligned_le32(status, buf); |
774 | break; | 774 | break; |
775 | case SetHubFeature: | 775 | case SetHubFeature: |
776 | switch (wValue) { | 776 | switch (wValue) { |
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 5be3bb3e6a9d..17dc2eccda83 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c | |||
@@ -736,14 +736,14 @@ static int ohci_hub_control ( | |||
736 | break; | 736 | break; |
737 | case GetHubStatus: | 737 | case GetHubStatus: |
738 | temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE); | 738 | temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE); |
739 | put_unaligned(cpu_to_le32 (temp), (__le32 *) buf); | 739 | put_unaligned_le32(temp, buf); |
740 | break; | 740 | break; |
741 | case GetPortStatus: | 741 | case GetPortStatus: |
742 | if (!wIndex || wIndex > ports) | 742 | if (!wIndex || wIndex > ports) |
743 | goto error; | 743 | goto error; |
744 | wIndex--; | 744 | wIndex--; |
745 | temp = roothub_portstatus (ohci, wIndex); | 745 | temp = roothub_portstatus (ohci, wIndex); |
746 | put_unaligned(cpu_to_le32 (temp), (__le32 *) buf); | 746 | put_unaligned_le32(temp, buf); |
747 | 747 | ||
748 | #ifndef OHCI_VERBOSE_DEBUG | 748 | #ifndef OHCI_VERBOSE_DEBUG |
749 | if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ | 749 | if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ |
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 3fd7a0c12078..426575247b23 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c | |||
@@ -1506,15 +1506,7 @@ static const char proc_filename[] = "driver/sl811h"; | |||
1506 | 1506 | ||
1507 | static void create_debug_file(struct sl811 *sl811) | 1507 | static void create_debug_file(struct sl811 *sl811) |
1508 | { | 1508 | { |
1509 | struct proc_dir_entry *pde; | 1509 | sl811->pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, sl811); |
1510 | |||
1511 | pde = create_proc_entry(proc_filename, 0, NULL); | ||
1512 | if (pde == NULL) | ||
1513 | return; | ||
1514 | |||
1515 | pde->proc_fops = &proc_ops; | ||
1516 | pde->data = sl811; | ||
1517 | sl811->pde = pde; | ||
1518 | } | 1510 | } |
1519 | 1511 | ||
1520 | static void remove_debug_file(struct sl811 *sl811) | 1512 | static void remove_debug_file(struct sl811 *sl811) |
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c index 17b5267f44d7..9f8a389dc7ae 100644 --- a/drivers/video/clps711xfb.c +++ b/drivers/video/clps711xfb.c | |||
@@ -381,7 +381,7 @@ int __init clps711xfb_init(void) | |||
381 | 381 | ||
382 | /* Register the /proc entries. */ | 382 | /* Register the /proc entries. */ |
383 | clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444, | 383 | clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444, |
384 | &proc_root); | 384 | NULL); |
385 | if (clps7111fb_backlight_proc_entry == NULL) { | 385 | if (clps7111fb_backlight_proc_entry == NULL) { |
386 | printk("Couldn't create the /proc entry for the backlight.\n"); | 386 | printk("Couldn't create the /proc entry for the backlight.\n"); |
387 | return -EINVAL; | 387 | return -EINVAL; |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 8eda7b60df8f..ad31983b43eb 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -1881,7 +1881,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, | |||
1881 | scr_memsetw((unsigned short *) (vc->vc_origin + | 1881 | scr_memsetw((unsigned short *) (vc->vc_origin + |
1882 | vc->vc_size_row * | 1882 | vc->vc_size_row * |
1883 | (b - count)), | 1883 | (b - count)), |
1884 | vc->vc_video_erase_char, | 1884 | vc->vc_scrl_erase_char, |
1885 | vc->vc_size_row * count); | 1885 | vc->vc_size_row * count); |
1886 | return 1; | 1886 | return 1; |
1887 | break; | 1887 | break; |
@@ -1953,7 +1953,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, | |||
1953 | scr_memsetw((unsigned short *) (vc->vc_origin + | 1953 | scr_memsetw((unsigned short *) (vc->vc_origin + |
1954 | vc->vc_size_row * | 1954 | vc->vc_size_row * |
1955 | (b - count)), | 1955 | (b - count)), |
1956 | vc->vc_video_erase_char, | 1956 | vc->vc_scrl_erase_char, |
1957 | vc->vc_size_row * count); | 1957 | vc->vc_size_row * count); |
1958 | return 1; | 1958 | return 1; |
1959 | } | 1959 | } |
@@ -1972,7 +1972,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, | |||
1972 | scr_memsetw((unsigned short *) (vc->vc_origin + | 1972 | scr_memsetw((unsigned short *) (vc->vc_origin + |
1973 | vc->vc_size_row * | 1973 | vc->vc_size_row * |
1974 | t), | 1974 | t), |
1975 | vc->vc_video_erase_char, | 1975 | vc->vc_scrl_erase_char, |
1976 | vc->vc_size_row * count); | 1976 | vc->vc_size_row * count); |
1977 | return 1; | 1977 | return 1; |
1978 | break; | 1978 | break; |
@@ -2042,7 +2042,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, | |||
2042 | scr_memsetw((unsigned short *) (vc->vc_origin + | 2042 | scr_memsetw((unsigned short *) (vc->vc_origin + |
2043 | vc->vc_size_row * | 2043 | vc->vc_size_row * |
2044 | t), | 2044 | t), |
2045 | vc->vc_video_erase_char, | 2045 | vc->vc_scrl_erase_char, |
2046 | vc->vc_size_row * count); | 2046 | vc->vc_size_row * count); |
2047 | return 1; | 2047 | return 1; |
2048 | } | 2048 | } |
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index bd8d995fe25d..38a296bbdfc9 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c | |||
@@ -531,7 +531,7 @@ static void mdacon_cursor(struct vc_data *c, int mode) | |||
531 | 531 | ||
532 | static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines) | 532 | static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines) |
533 | { | 533 | { |
534 | u16 eattr = mda_convert_attr(c->vc_video_erase_char); | 534 | u16 eattr = mda_convert_attr(c->vc_scrl_erase_char); |
535 | 535 | ||
536 | if (!lines) | 536 | if (!lines) |
537 | return 0; | 537 | return 0; |
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 67a682d6cc7b..a11cc2fdd4cd 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c | |||
@@ -170,12 +170,12 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count) | |||
170 | switch (dir) { | 170 | switch (dir) { |
171 | case SM_UP: | 171 | case SM_UP: |
172 | sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols); | 172 | sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols); |
173 | sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_video_erase_char); | 173 | sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_scrl_erase_char); |
174 | break; | 174 | break; |
175 | 175 | ||
176 | case SM_DOWN: | 176 | case SM_DOWN: |
177 | sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols); | 177 | sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols); |
178 | sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_video_erase_char); | 178 | sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_scrl_erase_char); |
179 | break; | 179 | break; |
180 | } | 180 | } |
181 | 181 | ||
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 6df29a62d720..bd1f57b259d9 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c | |||
@@ -1350,7 +1350,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, | |||
1350 | } else | 1350 | } else |
1351 | c->vc_origin += delta; | 1351 | c->vc_origin += delta; |
1352 | scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size - | 1352 | scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size - |
1353 | delta), c->vc_video_erase_char, | 1353 | delta), c->vc_scrl_erase_char, |
1354 | delta); | 1354 | delta); |
1355 | } else { | 1355 | } else { |
1356 | if (oldo - delta < vga_vram_base) { | 1356 | if (oldo - delta < vga_vram_base) { |
@@ -1363,7 +1363,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, | |||
1363 | } else | 1363 | } else |
1364 | c->vc_origin -= delta; | 1364 | c->vc_origin -= delta; |
1365 | c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; | 1365 | c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; |
1366 | scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, | 1366 | scr_memsetw((u16 *) (c->vc_origin), c->vc_scrl_erase_char, |
1367 | delta); | 1367 | delta); |
1368 | } | 1368 | } |
1369 | c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; | 1369 | c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; |
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c index aaa3e538e5da..5b5f072fc1a8 100644 --- a/drivers/video/matrox/matroxfb_misc.c +++ b/drivers/video/matrox/matroxfb_misc.c | |||
@@ -522,8 +522,6 @@ static void parse_bios(unsigned char __iomem* vbios, struct matrox_bios* bd) { | |||
522 | #endif | 522 | #endif |
523 | } | 523 | } |
524 | 524 | ||
525 | #define get_u16(x) (le16_to_cpu(get_unaligned((__u16*)(x)))) | ||
526 | #define get_u32(x) (le32_to_cpu(get_unaligned((__u32*)(x)))) | ||
527 | static int parse_pins1(WPMINFO const struct matrox_bios* bd) { | 525 | static int parse_pins1(WPMINFO const struct matrox_bios* bd) { |
528 | unsigned int maxdac; | 526 | unsigned int maxdac; |
529 | 527 | ||
@@ -532,11 +530,12 @@ static int parse_pins1(WPMINFO const struct matrox_bios* bd) { | |||
532 | case 1: maxdac = 220000; break; | 530 | case 1: maxdac = 220000; break; |
533 | default: maxdac = 240000; break; | 531 | default: maxdac = 240000; break; |
534 | } | 532 | } |
535 | if (get_u16(bd->pins + 24)) { | 533 | if (get_unaligned_le16(bd->pins + 24)) { |
536 | maxdac = get_u16(bd->pins + 24) * 10; | 534 | maxdac = get_unaligned_le16(bd->pins + 24) * 10; |
537 | } | 535 | } |
538 | MINFO->limits.pixel.vcomax = maxdac; | 536 | MINFO->limits.pixel.vcomax = maxdac; |
539 | MINFO->values.pll.system = get_u16(bd->pins + 28) ? get_u16(bd->pins + 28) * 10 : 50000; | 537 | MINFO->values.pll.system = get_unaligned_le16(bd->pins + 28) ? |
538 | get_unaligned_le16(bd->pins + 28) * 10 : 50000; | ||
540 | /* ignore 4MB, 8MB, module clocks */ | 539 | /* ignore 4MB, 8MB, module clocks */ |
541 | MINFO->features.pll.ref_freq = 14318; | 540 | MINFO->features.pll.ref_freq = 14318; |
542 | MINFO->values.reg.mctlwtst = 0x00030101; | 541 | MINFO->values.reg.mctlwtst = 0x00030101; |
@@ -575,7 +574,8 @@ static void default_pins2(WPMINFO2) { | |||
575 | static int parse_pins3(WPMINFO const struct matrox_bios* bd) { | 574 | static int parse_pins3(WPMINFO const struct matrox_bios* bd) { |
576 | MINFO->limits.pixel.vcomax = | 575 | MINFO->limits.pixel.vcomax = |
577 | MINFO->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000); | 576 | MINFO->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000); |
578 | MINFO->values.reg.mctlwtst = get_u32(bd->pins + 48) == 0xFFFFFFFF ? 0x01250A21 : get_u32(bd->pins + 48); | 577 | MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ? |
578 | 0x01250A21 : get_unaligned_le32(bd->pins + 48); | ||
579 | /* memory config */ | 579 | /* memory config */ |
580 | MINFO->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) | | 580 | MINFO->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) | |
581 | ((bd->pins[57] << 22) & 0x00C00000) | | 581 | ((bd->pins[57] << 22) & 0x00C00000) | |
@@ -601,7 +601,7 @@ static void default_pins3(WPMINFO2) { | |||
601 | static int parse_pins4(WPMINFO const struct matrox_bios* bd) { | 601 | static int parse_pins4(WPMINFO const struct matrox_bios* bd) { |
602 | MINFO->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000; | 602 | MINFO->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000; |
603 | MINFO->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? MINFO->limits.pixel.vcomax : bd->pins[ 38] * 4000; | 603 | MINFO->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? MINFO->limits.pixel.vcomax : bd->pins[ 38] * 4000; |
604 | MINFO->values.reg.mctlwtst = get_u32(bd->pins + 71); | 604 | MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 71); |
605 | MINFO->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) | | 605 | MINFO->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) | |
606 | ((bd->pins[87] << 22) & 0x00C00000) | | 606 | ((bd->pins[87] << 22) & 0x00C00000) | |
607 | ((bd->pins[86] << 1) & 0x000001E0) | | 607 | ((bd->pins[86] << 1) & 0x000001E0) | |
@@ -609,7 +609,7 @@ static int parse_pins4(WPMINFO const struct matrox_bios* bd) { | |||
609 | MINFO->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) | | 609 | MINFO->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) | |
610 | ((bd->pins[53] << 22) & 0x10000000) | | 610 | ((bd->pins[53] << 22) & 0x10000000) | |
611 | ((bd->pins[53] << 7) & 0x00001C00); | 611 | ((bd->pins[53] << 7) & 0x00001C00); |
612 | MINFO->values.reg.opt3 = get_u32(bd->pins + 67); | 612 | MINFO->values.reg.opt3 = get_unaligned_le32(bd->pins + 67); |
613 | MINFO->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000; | 613 | MINFO->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000; |
614 | MINFO->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000; | 614 | MINFO->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000; |
615 | return 0; | 615 | return 0; |
@@ -640,12 +640,12 @@ static int parse_pins5(WPMINFO const struct matrox_bios* bd) { | |||
640 | MINFO->limits.video.vcomin = (bd->pins[122] == 0xFF) ? MINFO->limits.system.vcomin : bd->pins[122] * mult; | 640 | MINFO->limits.video.vcomin = (bd->pins[122] == 0xFF) ? MINFO->limits.system.vcomin : bd->pins[122] * mult; |
641 | MINFO->values.pll.system = | 641 | MINFO->values.pll.system = |
642 | MINFO->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000; | 642 | MINFO->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000; |
643 | MINFO->values.reg.opt = get_u32(bd->pins+ 48); | 643 | MINFO->values.reg.opt = get_unaligned_le32(bd->pins + 48); |
644 | MINFO->values.reg.opt2 = get_u32(bd->pins+ 52); | 644 | MINFO->values.reg.opt2 = get_unaligned_le32(bd->pins + 52); |
645 | MINFO->values.reg.opt3 = get_u32(bd->pins+ 94); | 645 | MINFO->values.reg.opt3 = get_unaligned_le32(bd->pins + 94); |
646 | MINFO->values.reg.mctlwtst = get_u32(bd->pins+ 98); | 646 | MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 98); |
647 | MINFO->values.reg.memmisc = get_u32(bd->pins+102); | 647 | MINFO->values.reg.memmisc = get_unaligned_le32(bd->pins + 102); |
648 | MINFO->values.reg.memrdbk = get_u32(bd->pins+106); | 648 | MINFO->values.reg.memrdbk = get_unaligned_le32(bd->pins + 106); |
649 | MINFO->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000; | 649 | MINFO->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000; |
650 | MINFO->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20; | 650 | MINFO->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20; |
651 | MINFO->values.memory.dll = (bd->pins[115] & 0x02) != 0; | 651 | MINFO->values.memory.dll = (bd->pins[115] & 0x02) != 0; |
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c index 249791286367..cc4c038a1b3f 100644 --- a/drivers/video/metronomefb.c +++ b/drivers/video/metronomefb.c | |||
@@ -206,8 +206,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t, | |||
206 | } | 206 | } |
207 | 207 | ||
208 | /* check waveform mode table address checksum */ | 208 | /* check waveform mode table address checksum */ |
209 | wmta = le32_to_cpu(get_unaligned((__le32 *) wfm_hdr->wmta)); | 209 | wmta = get_unaligned_le32(wfm_hdr->wmta) & 0x00FFFFFF; |
210 | wmta &= 0x00FFFFFF; | ||
211 | cksum_idx = wmta + m*4 + 3; | 210 | cksum_idx = wmta + m*4 + 3; |
212 | if (cksum_idx > size) | 211 | if (cksum_idx > size) |
213 | return -EINVAL; | 212 | return -EINVAL; |
@@ -219,8 +218,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t, | |||
219 | } | 218 | } |
220 | 219 | ||
221 | /* check waveform temperature table address checksum */ | 220 | /* check waveform temperature table address checksum */ |
222 | tta = le32_to_cpu(get_unaligned((int *) (mem + wmta + m*4))); | 221 | tta = get_unaligned_le32(mem + wmta + m * 4) & 0x00FFFFFF; |
223 | tta &= 0x00FFFFFF; | ||
224 | cksum_idx = tta + trn*4 + 3; | 222 | cksum_idx = tta + trn*4 + 3; |
225 | if (cksum_idx > size) | 223 | if (cksum_idx > size) |
226 | return -EINVAL; | 224 | return -EINVAL; |
@@ -233,8 +231,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t, | |||
233 | 231 | ||
234 | /* here we do the real work of putting the waveform into the | 232 | /* here we do the real work of putting the waveform into the |
235 | metromem buffer. this does runlength decoding of the waveform */ | 233 | metromem buffer. this does runlength decoding of the waveform */ |
236 | wfm_idx = le32_to_cpu(get_unaligned((__le32 *) (mem + tta + trn*4))); | 234 | wfm_idx = get_unaligned_le32(mem + tta + trn * 4) & 0x00FFFFFF; |
237 | wfm_idx &= 0x00FFFFFF; | ||
238 | owfm_idx = wfm_idx; | 235 | owfm_idx = wfm_idx; |
239 | if (wfm_idx > size) | 236 | if (wfm_idx > size) |
240 | return -EINVAL; | 237 | return -EINVAL; |
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c index 2ce4cebc31d9..099b6fb5b5cb 100644 --- a/drivers/zorro/proc.c +++ b/drivers/zorro/proc.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/zorro.h> | 14 | #include <linux/zorro.h> |
15 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
16 | #include <linux/seq_file.h> | ||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/smp_lock.h> | 18 | #include <linux/smp_lock.h> |
18 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
@@ -76,36 +77,58 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t * | |||
76 | } | 77 | } |
77 | 78 | ||
78 | static const struct file_operations proc_bus_zorro_operations = { | 79 | static const struct file_operations proc_bus_zorro_operations = { |
80 | .owner = THIS_MODULE, | ||
79 | .llseek = proc_bus_zorro_lseek, | 81 | .llseek = proc_bus_zorro_lseek, |
80 | .read = proc_bus_zorro_read, | 82 | .read = proc_bus_zorro_read, |
81 | }; | 83 | }; |
82 | 84 | ||
83 | static int | 85 | static void * zorro_seq_start(struct seq_file *m, loff_t *pos) |
84 | get_zorro_dev_info(char *buf, char **start, off_t pos, int count) | ||
85 | { | 86 | { |
86 | u_int slot; | 87 | return (*pos < zorro_num_autocon) ? pos : NULL; |
87 | off_t at = 0; | 88 | } |
88 | int len, cnt; | 89 | |
89 | 90 | static void * zorro_seq_next(struct seq_file *m, void *v, loff_t *pos) | |
90 | for (slot = cnt = 0; slot < zorro_num_autocon && count > cnt; slot++) { | 91 | { |
91 | struct zorro_dev *z = &zorro_autocon[slot]; | 92 | (*pos)++; |
92 | len = sprintf(buf, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, | 93 | return (*pos < zorro_num_autocon) ? pos : NULL; |
93 | z->id, (unsigned long)zorro_resource_start(z), | 94 | } |
94 | (unsigned long)zorro_resource_len(z), | 95 | |
95 | z->rom.er_Type); | 96 | static void zorro_seq_stop(struct seq_file *m, void *v) |
96 | at += len; | 97 | { |
97 | if (at >= pos) { | 98 | } |
98 | if (!*start) { | 99 | |
99 | *start = buf + (pos - (at - len)); | 100 | static int zorro_seq_show(struct seq_file *m, void *v) |
100 | cnt = at - pos; | 101 | { |
101 | } else | 102 | u_int slot = *(loff_t *)v; |
102 | cnt += len; | 103 | struct zorro_dev *z = &zorro_autocon[slot]; |
103 | buf += len; | 104 | |
104 | } | 105 | seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, |
105 | } | 106 | (unsigned long)zorro_resource_start(z), |
106 | return (count > cnt) ? cnt : count; | 107 | (unsigned long)zorro_resource_len(z), |
108 | z->rom.er_Type); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static const struct seq_operations zorro_devices_seq_ops = { | ||
113 | .start = zorro_seq_start, | ||
114 | .next = zorro_seq_next, | ||
115 | .stop = zorro_seq_stop, | ||
116 | .show = zorro_seq_show, | ||
117 | }; | ||
118 | |||
119 | static int zorro_devices_proc_open(struct inode *inode, struct file *file) | ||
120 | { | ||
121 | return seq_open(file, &zorro_devices_seq_ops); | ||
107 | } | 122 | } |
108 | 123 | ||
124 | static const struct file_operations zorro_devices_proc_fops = { | ||
125 | .owner = THIS_MODULE, | ||
126 | .open = zorro_devices_proc_open, | ||
127 | .read = seq_read, | ||
128 | .llseek = seq_lseek, | ||
129 | .release = seq_release, | ||
130 | }; | ||
131 | |||
109 | static struct proc_dir_entry *proc_bus_zorro_dir; | 132 | static struct proc_dir_entry *proc_bus_zorro_dir; |
110 | 133 | ||
111 | static int __init zorro_proc_attach_device(u_int slot) | 134 | static int __init zorro_proc_attach_device(u_int slot) |
@@ -114,11 +137,11 @@ static int __init zorro_proc_attach_device(u_int slot) | |||
114 | char name[4]; | 137 | char name[4]; |
115 | 138 | ||
116 | sprintf(name, "%02x", slot); | 139 | sprintf(name, "%02x", slot); |
117 | entry = create_proc_entry(name, 0, proc_bus_zorro_dir); | 140 | entry = proc_create_data(name, 0, proc_bus_zorro_dir, |
141 | &proc_bus_zorro_operations, | ||
142 | &zorro_autocon[slot]); | ||
118 | if (!entry) | 143 | if (!entry) |
119 | return -ENOMEM; | 144 | return -ENOMEM; |
120 | entry->proc_fops = &proc_bus_zorro_operations; | ||
121 | entry->data = &zorro_autocon[slot]; | ||
122 | entry->size = sizeof(struct zorro_dev); | 145 | entry->size = sizeof(struct zorro_dev); |
123 | return 0; | 146 | return 0; |
124 | } | 147 | } |
@@ -128,9 +151,9 @@ static int __init zorro_proc_init(void) | |||
128 | u_int slot; | 151 | u_int slot; |
129 | 152 | ||
130 | if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { | 153 | if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { |
131 | proc_bus_zorro_dir = proc_mkdir("zorro", proc_bus); | 154 | proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); |
132 | create_proc_info_entry("devices", 0, proc_bus_zorro_dir, | 155 | proc_create("devices", 0, proc_bus_zorro_dir, |
133 | get_zorro_dev_info); | 156 | &zorro_devices_proc_fops); |
134 | for (slot = 0; slot < zorro_num_autocon; slot++) | 157 | for (slot = 0; slot < zorro_num_autocon; slot++) |
135 | zorro_proc_attach_device(slot); | 158 | zorro_proc_attach_device(slot); |
136 | } | 159 | } |