diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-07 16:35:17 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-07 16:35:17 -0500 |
commit | 21b4e736922f546e0f1aa7b9d6c442f309a2444a (patch) | |
tree | e1be8645297f8ebe87445251743ebcc52081a20d /drivers/char | |
parent | 34161db6b14d984fb9b06c735b7b42f8803f6851 (diff) | |
parent | 68380b581383c028830f79ec2670f4a193854aa6 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/ into merge_linus
Diffstat (limited to 'drivers/char')
33 files changed, 1791 insertions, 932 deletions
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 00b17ae39736..2f2c4efff8a3 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -459,7 +459,7 @@ static const struct aper_size_info_32 nforce3_sizes[5] = | |||
459 | 459 | ||
460 | /* Handle shadow device of the Nvidia NForce3 */ | 460 | /* Handle shadow device of the Nvidia NForce3 */ |
461 | /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */ | 461 | /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */ |
462 | static int __devinit nforce3_agp_init(struct pci_dev *pdev) | 462 | static int nforce3_agp_init(struct pci_dev *pdev) |
463 | { | 463 | { |
464 | u32 tmp, apbase, apbar, aplimit; | 464 | u32 tmp, apbase, apbar, aplimit; |
465 | struct pci_dev *dev1; | 465 | struct pci_dev *dev1; |
diff --git a/drivers/char/decserial.c b/drivers/char/decserial.c index 85f404e25c73..8ea2bea2b183 100644 --- a/drivers/char/decserial.c +++ b/drivers/char/decserial.c | |||
@@ -23,20 +23,12 @@ | |||
23 | extern int zs_init(void); | 23 | extern int zs_init(void); |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #ifdef CONFIG_DZ | ||
27 | extern int dz_init(void); | ||
28 | #endif | ||
29 | |||
30 | #ifdef CONFIG_SERIAL_CONSOLE | 26 | #ifdef CONFIG_SERIAL_CONSOLE |
31 | 27 | ||
32 | #ifdef CONFIG_ZS | 28 | #ifdef CONFIG_ZS |
33 | extern void zs_serial_console_init(void); | 29 | extern void zs_serial_console_init(void); |
34 | #endif | 30 | #endif |
35 | 31 | ||
36 | #ifdef CONFIG_DZ | ||
37 | extern void dz_serial_console_init(void); | ||
38 | #endif | ||
39 | |||
40 | #endif | 32 | #endif |
41 | 33 | ||
42 | /* rs_init - starts up the serial interface - | 34 | /* rs_init - starts up the serial interface - |
@@ -46,23 +38,11 @@ extern void dz_serial_console_init(void); | |||
46 | 38 | ||
47 | int __init rs_init(void) | 39 | int __init rs_init(void) |
48 | { | 40 | { |
49 | 41 | #ifdef CONFIG_ZS | |
50 | #if defined(CONFIG_ZS) && defined(CONFIG_DZ) | ||
51 | if (IOASIC) | 42 | if (IOASIC) |
52 | return zs_init(); | 43 | return zs_init(); |
53 | else | ||
54 | return dz_init(); | ||
55 | #else | ||
56 | |||
57 | #ifdef CONFIG_ZS | ||
58 | return zs_init(); | ||
59 | #endif | ||
60 | |||
61 | #ifdef CONFIG_DZ | ||
62 | return dz_init(); | ||
63 | #endif | ||
64 | |||
65 | #endif | 44 | #endif |
45 | return -ENXIO; | ||
66 | } | 46 | } |
67 | 47 | ||
68 | __initcall(rs_init); | 48 | __initcall(rs_init); |
@@ -76,21 +56,9 @@ __initcall(rs_init); | |||
76 | */ | 56 | */ |
77 | static int __init decserial_console_init(void) | 57 | static int __init decserial_console_init(void) |
78 | { | 58 | { |
79 | #if defined(CONFIG_ZS) && defined(CONFIG_DZ) | 59 | #ifdef CONFIG_ZS |
80 | if (IOASIC) | 60 | if (IOASIC) |
81 | zs_serial_console_init(); | 61 | zs_serial_console_init(); |
82 | else | ||
83 | dz_serial_console_init(); | ||
84 | #else | ||
85 | |||
86 | #ifdef CONFIG_ZS | ||
87 | zs_serial_console_init(); | ||
88 | #endif | ||
89 | |||
90 | #ifdef CONFIG_DZ | ||
91 | dz_serial_console_init(); | ||
92 | #endif | ||
93 | |||
94 | #endif | 62 | #endif |
95 | return 0; | 63 | return 0; |
96 | } | 64 | } |
diff --git a/drivers/char/drm/drm_sman.c b/drivers/char/drm/drm_sman.c index 425c82336ee0..19c81d2e13d0 100644 --- a/drivers/char/drm/drm_sman.c +++ b/drivers/char/drm/drm_sman.c | |||
@@ -162,6 +162,7 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, | |||
162 | 162 | ||
163 | return 0; | 163 | return 0; |
164 | } | 164 | } |
165 | EXPORT_SYMBOL(drm_sman_set_manager); | ||
165 | 166 | ||
166 | static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman, | 167 | static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman, |
167 | unsigned long owner) | 168 | unsigned long owner) |
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index b40ae438f531..ae2691942ddb 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c | |||
@@ -147,14 +147,14 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, | |||
147 | if (address > vma->vm_end) | 147 | if (address > vma->vm_end) |
148 | return NOPAGE_SIGBUS; /* Disallow mremap */ | 148 | return NOPAGE_SIGBUS; /* Disallow mremap */ |
149 | if (!map) | 149 | if (!map) |
150 | return NOPAGE_OOM; /* Nothing allocated */ | 150 | return NOPAGE_SIGBUS; /* Nothing allocated */ |
151 | 151 | ||
152 | offset = address - vma->vm_start; | 152 | offset = address - vma->vm_start; |
153 | i = (unsigned long)map->handle + offset; | 153 | i = (unsigned long)map->handle + offset; |
154 | page = (map->type == _DRM_CONSISTENT) ? | 154 | page = (map->type == _DRM_CONSISTENT) ? |
155 | virt_to_page((void *)i) : vmalloc_to_page((void *)i); | 155 | virt_to_page((void *)i) : vmalloc_to_page((void *)i); |
156 | if (!page) | 156 | if (!page) |
157 | return NOPAGE_OOM; | 157 | return NOPAGE_SIGBUS; |
158 | get_page(page); | 158 | get_page(page); |
159 | 159 | ||
160 | DRM_DEBUG("shm_nopage 0x%lx\n", address); | 160 | DRM_DEBUG("shm_nopage 0x%lx\n", address); |
@@ -272,7 +272,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, | |||
272 | if (address > vma->vm_end) | 272 | if (address > vma->vm_end) |
273 | return NOPAGE_SIGBUS; /* Disallow mremap */ | 273 | return NOPAGE_SIGBUS; /* Disallow mremap */ |
274 | if (!dma->pagelist) | 274 | if (!dma->pagelist) |
275 | return NOPAGE_OOM; /* Nothing allocated */ | 275 | return NOPAGE_SIGBUS; /* Nothing allocated */ |
276 | 276 | ||
277 | offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ | 277 | offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ |
278 | page_nr = offset >> PAGE_SHIFT; | 278 | page_nr = offset >> PAGE_SHIFT; |
@@ -310,7 +310,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, | |||
310 | if (address > vma->vm_end) | 310 | if (address > vma->vm_end) |
311 | return NOPAGE_SIGBUS; /* Disallow mremap */ | 311 | return NOPAGE_SIGBUS; /* Disallow mremap */ |
312 | if (!entry->pagelist) | 312 | if (!entry->pagelist) |
313 | return NOPAGE_OOM; /* Nothing allocated */ | 313 | return NOPAGE_SIGBUS; /* Nothing allocated */ |
314 | 314 | ||
315 | offset = address - vma->vm_start; | 315 | offset = address - vma->vm_start; |
316 | map_offset = map->offset - (unsigned long)dev->sg->virtual; | 316 | map_offset = map->offset - (unsigned long)dev->sg->virtual; |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 9902ffad3b12..cc2cd46bedc6 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/sched.h> | 38 | #include <linux/sched.h> |
39 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
40 | #include <linux/delay.h> | 40 | #include <linux/delay.h> |
41 | #include <linux/freezer.h> | ||
41 | 42 | ||
42 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
43 | 44 | ||
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c index 8728255c9463..d090622f1dea 100644 --- a/drivers/char/hvcs.c +++ b/drivers/char/hvcs.c | |||
@@ -337,11 +337,6 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp); | |||
337 | static void hvcs_close(struct tty_struct *tty, struct file *filp); | 337 | static void hvcs_close(struct tty_struct *tty, struct file *filp); |
338 | static void hvcs_hangup(struct tty_struct * tty); | 338 | static void hvcs_hangup(struct tty_struct * tty); |
339 | 339 | ||
340 | static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd); | ||
341 | static void hvcs_remove_device_attrs(struct vio_dev *vdev); | ||
342 | static void hvcs_create_driver_attrs(void); | ||
343 | static void hvcs_remove_driver_attrs(void); | ||
344 | |||
345 | static int __devinit hvcs_probe(struct vio_dev *dev, | 340 | static int __devinit hvcs_probe(struct vio_dev *dev, |
346 | const struct vio_device_id *id); | 341 | const struct vio_device_id *id); |
347 | static int __devexit hvcs_remove(struct vio_dev *dev); | 342 | static int __devexit hvcs_remove(struct vio_dev *dev); |
@@ -353,6 +348,172 @@ static void __exit hvcs_module_exit(void); | |||
353 | #define HVCS_TRY_WRITE 0x00000004 | 348 | #define HVCS_TRY_WRITE 0x00000004 |
354 | #define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ) | 349 | #define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ) |
355 | 350 | ||
351 | static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod) | ||
352 | { | ||
353 | return viod->dev.driver_data; | ||
354 | } | ||
355 | /* The sysfs interface for the driver and devices */ | ||
356 | |||
357 | static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
358 | { | ||
359 | struct vio_dev *viod = to_vio_dev(dev); | ||
360 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
361 | unsigned long flags; | ||
362 | int retval; | ||
363 | |||
364 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
365 | retval = sprintf(buf, "%X\n", hvcsd->p_unit_address); | ||
366 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
367 | return retval; | ||
368 | } | ||
369 | static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL); | ||
370 | |||
371 | static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
372 | { | ||
373 | struct vio_dev *viod = to_vio_dev(dev); | ||
374 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
375 | unsigned long flags; | ||
376 | int retval; | ||
377 | |||
378 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
379 | retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); | ||
380 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
381 | return retval; | ||
382 | } | ||
383 | static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL); | ||
384 | |||
385 | static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf, | ||
386 | size_t count) | ||
387 | { | ||
388 | /* | ||
389 | * Don't need this feature at the present time because firmware doesn't | ||
390 | * yet support multiple partners. | ||
391 | */ | ||
392 | printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n"); | ||
393 | return -EPERM; | ||
394 | } | ||
395 | |||
396 | static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
397 | { | ||
398 | struct vio_dev *viod = to_vio_dev(dev); | ||
399 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
400 | unsigned long flags; | ||
401 | int retval; | ||
402 | |||
403 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
404 | retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); | ||
405 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
406 | return retval; | ||
407 | } | ||
408 | |||
409 | static DEVICE_ATTR(current_vty, | ||
410 | S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store); | ||
411 | |||
412 | static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf, | ||
413 | size_t count) | ||
414 | { | ||
415 | struct vio_dev *viod = to_vio_dev(dev); | ||
416 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
417 | unsigned long flags; | ||
418 | |||
419 | /* writing a '0' to this sysfs entry will result in the disconnect. */ | ||
420 | if (simple_strtol(buf, NULL, 0) != 0) | ||
421 | return -EINVAL; | ||
422 | |||
423 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
424 | |||
425 | if (hvcsd->open_count > 0) { | ||
426 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
427 | printk(KERN_INFO "HVCS: vterm state unchanged. " | ||
428 | "The hvcs device node is still in use.\n"); | ||
429 | return -EPERM; | ||
430 | } | ||
431 | |||
432 | if (hvcsd->connected == 0) { | ||
433 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
434 | printk(KERN_INFO "HVCS: vterm state unchanged. The" | ||
435 | " vty-server is not connected to a vty.\n"); | ||
436 | return -EPERM; | ||
437 | } | ||
438 | |||
439 | hvcs_partner_free(hvcsd); | ||
440 | printk(KERN_INFO "HVCS: Closed vty-server@%X and" | ||
441 | " partner vty@%X:%d connection.\n", | ||
442 | hvcsd->vdev->unit_address, | ||
443 | hvcsd->p_unit_address, | ||
444 | (uint32_t)hvcsd->p_partition_ID); | ||
445 | |||
446 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
447 | return count; | ||
448 | } | ||
449 | |||
450 | static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
451 | { | ||
452 | struct vio_dev *viod = to_vio_dev(dev); | ||
453 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
454 | unsigned long flags; | ||
455 | int retval; | ||
456 | |||
457 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
458 | retval = sprintf(buf, "%d\n", hvcsd->connected); | ||
459 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
460 | return retval; | ||
461 | } | ||
462 | static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR, | ||
463 | hvcs_vterm_state_show, hvcs_vterm_state_store); | ||
464 | |||
465 | static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
466 | { | ||
467 | struct vio_dev *viod = to_vio_dev(dev); | ||
468 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
469 | unsigned long flags; | ||
470 | int retval; | ||
471 | |||
472 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
473 | retval = sprintf(buf, "%d\n", hvcsd->index); | ||
474 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
475 | return retval; | ||
476 | } | ||
477 | |||
478 | static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL); | ||
479 | |||
480 | static struct attribute *hvcs_attrs[] = { | ||
481 | &dev_attr_partner_vtys.attr, | ||
482 | &dev_attr_partner_clcs.attr, | ||
483 | &dev_attr_current_vty.attr, | ||
484 | &dev_attr_vterm_state.attr, | ||
485 | &dev_attr_index.attr, | ||
486 | NULL, | ||
487 | }; | ||
488 | |||
489 | static struct attribute_group hvcs_attr_group = { | ||
490 | .attrs = hvcs_attrs, | ||
491 | }; | ||
492 | |||
493 | static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf) | ||
494 | { | ||
495 | /* A 1 means it is updating, a 0 means it is done updating */ | ||
496 | return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status); | ||
497 | } | ||
498 | |||
499 | static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf, | ||
500 | size_t count) | ||
501 | { | ||
502 | if ((simple_strtol(buf, NULL, 0) != 1) | ||
503 | && (hvcs_rescan_status != 0)) | ||
504 | return -EINVAL; | ||
505 | |||
506 | hvcs_rescan_status = 1; | ||
507 | printk(KERN_INFO "HVCS: rescanning partner info for all" | ||
508 | " vty-servers.\n"); | ||
509 | hvcs_rescan_devices_list(); | ||
510 | hvcs_rescan_status = 0; | ||
511 | return count; | ||
512 | } | ||
513 | |||
514 | static DRIVER_ATTR(rescan, | ||
515 | S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store); | ||
516 | |||
356 | static void hvcs_kick(void) | 517 | static void hvcs_kick(void) |
357 | { | 518 | { |
358 | hvcs_kicked = 1; | 519 | hvcs_kicked = 1; |
@@ -575,7 +736,7 @@ static void destroy_hvcs_struct(struct kobject *kobj) | |||
575 | spin_unlock_irqrestore(&hvcsd->lock, flags); | 736 | spin_unlock_irqrestore(&hvcsd->lock, flags); |
576 | spin_unlock(&hvcs_structs_lock); | 737 | spin_unlock(&hvcs_structs_lock); |
577 | 738 | ||
578 | hvcs_remove_device_attrs(vdev); | 739 | sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group); |
579 | 740 | ||
580 | kfree(hvcsd); | 741 | kfree(hvcsd); |
581 | } | 742 | } |
@@ -608,6 +769,7 @@ static int __devinit hvcs_probe( | |||
608 | { | 769 | { |
609 | struct hvcs_struct *hvcsd; | 770 | struct hvcs_struct *hvcsd; |
610 | int index; | 771 | int index; |
772 | int retval; | ||
611 | 773 | ||
612 | if (!dev || !id) { | 774 | if (!dev || !id) { |
613 | printk(KERN_ERR "HVCS: probed with invalid parameter.\n"); | 775 | printk(KERN_ERR "HVCS: probed with invalid parameter.\n"); |
@@ -658,14 +820,16 @@ static int __devinit hvcs_probe( | |||
658 | * the hvcs_struct has been added to the devices list then the user app | 820 | * the hvcs_struct has been added to the devices list then the user app |
659 | * will get -ENODEV. | 821 | * will get -ENODEV. |
660 | */ | 822 | */ |
661 | |||
662 | spin_lock(&hvcs_structs_lock); | 823 | spin_lock(&hvcs_structs_lock); |
663 | |||
664 | list_add_tail(&(hvcsd->next), &hvcs_structs); | 824 | list_add_tail(&(hvcsd->next), &hvcs_structs); |
665 | |||
666 | spin_unlock(&hvcs_structs_lock); | 825 | spin_unlock(&hvcs_structs_lock); |
667 | 826 | ||
668 | hvcs_create_device_attrs(hvcsd); | 827 | retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group); |
828 | if (retval) { | ||
829 | printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n", | ||
830 | hvcsd->vdev->unit_address); | ||
831 | return retval; | ||
832 | } | ||
669 | 833 | ||
670 | printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address); | 834 | printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address); |
671 | 835 | ||
@@ -1354,8 +1518,10 @@ static int __init hvcs_module_init(void) | |||
1354 | if (!hvcs_tty_driver) | 1518 | if (!hvcs_tty_driver) |
1355 | return -ENOMEM; | 1519 | return -ENOMEM; |
1356 | 1520 | ||
1357 | if (hvcs_alloc_index_list(num_ttys_to_alloc)) | 1521 | if (hvcs_alloc_index_list(num_ttys_to_alloc)) { |
1358 | return -ENOMEM; | 1522 | rc = -ENOMEM; |
1523 | goto index_fail; | ||
1524 | } | ||
1359 | 1525 | ||
1360 | hvcs_tty_driver->owner = THIS_MODULE; | 1526 | hvcs_tty_driver->owner = THIS_MODULE; |
1361 | 1527 | ||
@@ -1385,41 +1551,57 @@ static int __init hvcs_module_init(void) | |||
1385 | * dynamically assigned major and minor numbers for our devices. | 1551 | * dynamically assigned major and minor numbers for our devices. |
1386 | */ | 1552 | */ |
1387 | if (tty_register_driver(hvcs_tty_driver)) { | 1553 | if (tty_register_driver(hvcs_tty_driver)) { |
1388 | printk(KERN_ERR "HVCS: registration " | 1554 | printk(KERN_ERR "HVCS: registration as a tty driver failed.\n"); |
1389 | " as a tty driver failed.\n"); | 1555 | rc = -EIO; |
1390 | hvcs_free_index_list(); | 1556 | goto register_fail; |
1391 | put_tty_driver(hvcs_tty_driver); | ||
1392 | return -EIO; | ||
1393 | } | 1557 | } |
1394 | 1558 | ||
1395 | hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL); | 1559 | hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL); |
1396 | if (!hvcs_pi_buff) { | 1560 | if (!hvcs_pi_buff) { |
1397 | tty_unregister_driver(hvcs_tty_driver); | 1561 | rc = -ENOMEM; |
1398 | hvcs_free_index_list(); | 1562 | goto buff_alloc_fail; |
1399 | put_tty_driver(hvcs_tty_driver); | ||
1400 | return -ENOMEM; | ||
1401 | } | 1563 | } |
1402 | 1564 | ||
1403 | hvcs_task = kthread_run(khvcsd, NULL, "khvcsd"); | 1565 | hvcs_task = kthread_run(khvcsd, NULL, "khvcsd"); |
1404 | if (IS_ERR(hvcs_task)) { | 1566 | if (IS_ERR(hvcs_task)) { |
1405 | printk(KERN_ERR "HVCS: khvcsd creation failed. Driver not loaded.\n"); | 1567 | printk(KERN_ERR "HVCS: khvcsd creation failed. Driver not loaded.\n"); |
1406 | kfree(hvcs_pi_buff); | 1568 | rc = -EIO; |
1407 | tty_unregister_driver(hvcs_tty_driver); | 1569 | goto kthread_fail; |
1408 | hvcs_free_index_list(); | ||
1409 | put_tty_driver(hvcs_tty_driver); | ||
1410 | return -EIO; | ||
1411 | } | 1570 | } |
1412 | 1571 | ||
1413 | rc = vio_register_driver(&hvcs_vio_driver); | 1572 | rc = vio_register_driver(&hvcs_vio_driver); |
1573 | if (rc) { | ||
1574 | printk(KERN_ERR "HVCS: can't register vio driver\n"); | ||
1575 | goto vio_fail; | ||
1576 | } | ||
1414 | 1577 | ||
1415 | /* | 1578 | /* |
1416 | * This needs to be done AFTER the vio_register_driver() call or else | 1579 | * This needs to be done AFTER the vio_register_driver() call or else |
1417 | * the kobjects won't be initialized properly. | 1580 | * the kobjects won't be initialized properly. |
1418 | */ | 1581 | */ |
1419 | hvcs_create_driver_attrs(); | 1582 | rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan); |
1583 | if (rc) { | ||
1584 | printk(KERN_ERR "HVCS: sysfs attr create failed\n"); | ||
1585 | goto attr_fail; | ||
1586 | } | ||
1420 | 1587 | ||
1421 | printk(KERN_INFO "HVCS: driver module inserted.\n"); | 1588 | printk(KERN_INFO "HVCS: driver module inserted.\n"); |
1422 | 1589 | ||
1590 | return 0; | ||
1591 | |||
1592 | attr_fail: | ||
1593 | vio_unregister_driver(&hvcs_vio_driver); | ||
1594 | vio_fail: | ||
1595 | kthread_stop(hvcs_task); | ||
1596 | kthread_fail: | ||
1597 | kfree(hvcs_pi_buff); | ||
1598 | buff_alloc_fail: | ||
1599 | tty_unregister_driver(hvcs_tty_driver); | ||
1600 | register_fail: | ||
1601 | hvcs_free_index_list(); | ||
1602 | index_fail: | ||
1603 | put_tty_driver(hvcs_tty_driver); | ||
1604 | hvcs_tty_driver = NULL; | ||
1423 | return rc; | 1605 | return rc; |
1424 | } | 1606 | } |
1425 | 1607 | ||
@@ -1441,7 +1623,7 @@ static void __exit hvcs_module_exit(void) | |||
1441 | hvcs_pi_buff = NULL; | 1623 | hvcs_pi_buff = NULL; |
1442 | spin_unlock(&hvcs_pi_lock); | 1624 | spin_unlock(&hvcs_pi_lock); |
1443 | 1625 | ||
1444 | hvcs_remove_driver_attrs(); | 1626 | driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan); |
1445 | 1627 | ||
1446 | vio_unregister_driver(&hvcs_vio_driver); | 1628 | vio_unregister_driver(&hvcs_vio_driver); |
1447 | 1629 | ||
@@ -1456,191 +1638,3 @@ static void __exit hvcs_module_exit(void) | |||
1456 | 1638 | ||
1457 | module_init(hvcs_module_init); | 1639 | module_init(hvcs_module_init); |
1458 | module_exit(hvcs_module_exit); | 1640 | module_exit(hvcs_module_exit); |
1459 | |||
1460 | static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod) | ||
1461 | { | ||
1462 | return viod->dev.driver_data; | ||
1463 | } | ||
1464 | /* The sysfs interface for the driver and devices */ | ||
1465 | |||
1466 | static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1467 | { | ||
1468 | struct vio_dev *viod = to_vio_dev(dev); | ||
1469 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
1470 | unsigned long flags; | ||
1471 | int retval; | ||
1472 | |||
1473 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
1474 | retval = sprintf(buf, "%X\n", hvcsd->p_unit_address); | ||
1475 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
1476 | return retval; | ||
1477 | } | ||
1478 | static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL); | ||
1479 | |||
1480 | static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1481 | { | ||
1482 | struct vio_dev *viod = to_vio_dev(dev); | ||
1483 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
1484 | unsigned long flags; | ||
1485 | int retval; | ||
1486 | |||
1487 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
1488 | retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); | ||
1489 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
1490 | return retval; | ||
1491 | } | ||
1492 | static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL); | ||
1493 | |||
1494 | static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf, | ||
1495 | size_t count) | ||
1496 | { | ||
1497 | /* | ||
1498 | * Don't need this feature at the present time because firmware doesn't | ||
1499 | * yet support multiple partners. | ||
1500 | */ | ||
1501 | printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n"); | ||
1502 | return -EPERM; | ||
1503 | } | ||
1504 | |||
1505 | static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1506 | { | ||
1507 | struct vio_dev *viod = to_vio_dev(dev); | ||
1508 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
1509 | unsigned long flags; | ||
1510 | int retval; | ||
1511 | |||
1512 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
1513 | retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); | ||
1514 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
1515 | return retval; | ||
1516 | } | ||
1517 | |||
1518 | static DEVICE_ATTR(current_vty, | ||
1519 | S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store); | ||
1520 | |||
1521 | static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf, | ||
1522 | size_t count) | ||
1523 | { | ||
1524 | struct vio_dev *viod = to_vio_dev(dev); | ||
1525 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
1526 | unsigned long flags; | ||
1527 | |||
1528 | /* writing a '0' to this sysfs entry will result in the disconnect. */ | ||
1529 | if (simple_strtol(buf, NULL, 0) != 0) | ||
1530 | return -EINVAL; | ||
1531 | |||
1532 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
1533 | |||
1534 | if (hvcsd->open_count > 0) { | ||
1535 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
1536 | printk(KERN_INFO "HVCS: vterm state unchanged. " | ||
1537 | "The hvcs device node is still in use.\n"); | ||
1538 | return -EPERM; | ||
1539 | } | ||
1540 | |||
1541 | if (hvcsd->connected == 0) { | ||
1542 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
1543 | printk(KERN_INFO "HVCS: vterm state unchanged. The" | ||
1544 | " vty-server is not connected to a vty.\n"); | ||
1545 | return -EPERM; | ||
1546 | } | ||
1547 | |||
1548 | hvcs_partner_free(hvcsd); | ||
1549 | printk(KERN_INFO "HVCS: Closed vty-server@%X and" | ||
1550 | " partner vty@%X:%d connection.\n", | ||
1551 | hvcsd->vdev->unit_address, | ||
1552 | hvcsd->p_unit_address, | ||
1553 | (uint32_t)hvcsd->p_partition_ID); | ||
1554 | |||
1555 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
1556 | return count; | ||
1557 | } | ||
1558 | |||
1559 | static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1560 | { | ||
1561 | struct vio_dev *viod = to_vio_dev(dev); | ||
1562 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
1563 | unsigned long flags; | ||
1564 | int retval; | ||
1565 | |||
1566 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
1567 | retval = sprintf(buf, "%d\n", hvcsd->connected); | ||
1568 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
1569 | return retval; | ||
1570 | } | ||
1571 | static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR, | ||
1572 | hvcs_vterm_state_show, hvcs_vterm_state_store); | ||
1573 | |||
1574 | static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1575 | { | ||
1576 | struct vio_dev *viod = to_vio_dev(dev); | ||
1577 | struct hvcs_struct *hvcsd = from_vio_dev(viod); | ||
1578 | unsigned long flags; | ||
1579 | int retval; | ||
1580 | |||
1581 | spin_lock_irqsave(&hvcsd->lock, flags); | ||
1582 | retval = sprintf(buf, "%d\n", hvcsd->index); | ||
1583 | spin_unlock_irqrestore(&hvcsd->lock, flags); | ||
1584 | return retval; | ||
1585 | } | ||
1586 | |||
1587 | static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL); | ||
1588 | |||
1589 | static struct attribute *hvcs_attrs[] = { | ||
1590 | &dev_attr_partner_vtys.attr, | ||
1591 | &dev_attr_partner_clcs.attr, | ||
1592 | &dev_attr_current_vty.attr, | ||
1593 | &dev_attr_vterm_state.attr, | ||
1594 | &dev_attr_index.attr, | ||
1595 | NULL, | ||
1596 | }; | ||
1597 | |||
1598 | static struct attribute_group hvcs_attr_group = { | ||
1599 | .attrs = hvcs_attrs, | ||
1600 | }; | ||
1601 | |||
1602 | static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd) | ||
1603 | { | ||
1604 | struct vio_dev *vdev = hvcsd->vdev; | ||
1605 | sysfs_create_group(&vdev->dev.kobj, &hvcs_attr_group); | ||
1606 | } | ||
1607 | |||
1608 | static void hvcs_remove_device_attrs(struct vio_dev *vdev) | ||
1609 | { | ||
1610 | sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group); | ||
1611 | } | ||
1612 | |||
1613 | static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf) | ||
1614 | { | ||
1615 | /* A 1 means it is updating, a 0 means it is done updating */ | ||
1616 | return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status); | ||
1617 | } | ||
1618 | |||
1619 | static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf, | ||
1620 | size_t count) | ||
1621 | { | ||
1622 | if ((simple_strtol(buf, NULL, 0) != 1) | ||
1623 | && (hvcs_rescan_status != 0)) | ||
1624 | return -EINVAL; | ||
1625 | |||
1626 | hvcs_rescan_status = 1; | ||
1627 | printk(KERN_INFO "HVCS: rescanning partner info for all" | ||
1628 | " vty-servers.\n"); | ||
1629 | hvcs_rescan_devices_list(); | ||
1630 | hvcs_rescan_status = 0; | ||
1631 | return count; | ||
1632 | } | ||
1633 | static DRIVER_ATTR(rescan, | ||
1634 | S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store); | ||
1635 | |||
1636 | static void hvcs_create_driver_attrs(void) | ||
1637 | { | ||
1638 | struct device_driver *driverfs = &(hvcs_vio_driver.driver); | ||
1639 | driver_create_file(driverfs, &driver_attr_rescan); | ||
1640 | } | ||
1641 | |||
1642 | static void hvcs_remove_driver_attrs(void) | ||
1643 | { | ||
1644 | struct device_driver *driverfs = &(hvcs_vio_driver.driver); | ||
1645 | driver_remove_file(driverfs, &driver_attr_rescan); | ||
1646 | } | ||
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 9f7635f75178..5f3acd8e64b8 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -3,17 +3,20 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | config HW_RANDOM | 5 | config HW_RANDOM |
6 | bool "Hardware Random Number Generator Core support" | 6 | tristate "Hardware Random Number Generator Core support" |
7 | default y | 7 | default m |
8 | ---help--- | 8 | ---help--- |
9 | Hardware Random Number Generator Core infrastructure. | 9 | Hardware Random Number Generator Core infrastructure. |
10 | 10 | ||
11 | To compile this driver as a module, choose M here: the | ||
12 | module will be called rng-core. | ||
13 | |||
11 | If unsure, say Y. | 14 | If unsure, say Y. |
12 | 15 | ||
13 | config HW_RANDOM_INTEL | 16 | config HW_RANDOM_INTEL |
14 | tristate "Intel HW Random Number Generator support" | 17 | tristate "Intel HW Random Number Generator support" |
15 | depends on HW_RANDOM && (X86 || IA64) && PCI | 18 | depends on HW_RANDOM && (X86 || IA64) && PCI |
16 | default y | 19 | default HW_RANDOM |
17 | ---help--- | 20 | ---help--- |
18 | This driver provides kernel-side support for the Random Number | 21 | This driver provides kernel-side support for the Random Number |
19 | Generator hardware found on Intel i8xx-based motherboards. | 22 | Generator hardware found on Intel i8xx-based motherboards. |
@@ -26,7 +29,7 @@ config HW_RANDOM_INTEL | |||
26 | config HW_RANDOM_AMD | 29 | config HW_RANDOM_AMD |
27 | tristate "AMD HW Random Number Generator support" | 30 | tristate "AMD HW Random Number Generator support" |
28 | depends on HW_RANDOM && X86 && PCI | 31 | depends on HW_RANDOM && X86 && PCI |
29 | default y | 32 | default HW_RANDOM |
30 | ---help--- | 33 | ---help--- |
31 | This driver provides kernel-side support for the Random Number | 34 | This driver provides kernel-side support for the Random Number |
32 | Generator hardware found on AMD 76x-based motherboards. | 35 | Generator hardware found on AMD 76x-based motherboards. |
@@ -39,7 +42,7 @@ config HW_RANDOM_AMD | |||
39 | config HW_RANDOM_GEODE | 42 | config HW_RANDOM_GEODE |
40 | tristate "AMD Geode HW Random Number Generator support" | 43 | tristate "AMD Geode HW Random Number Generator support" |
41 | depends on HW_RANDOM && X86 && PCI | 44 | depends on HW_RANDOM && X86 && PCI |
42 | default y | 45 | default HW_RANDOM |
43 | ---help--- | 46 | ---help--- |
44 | This driver provides kernel-side support for the Random Number | 47 | This driver provides kernel-side support for the Random Number |
45 | Generator hardware found on the AMD Geode LX. | 48 | Generator hardware found on the AMD Geode LX. |
@@ -52,7 +55,7 @@ config HW_RANDOM_GEODE | |||
52 | config HW_RANDOM_VIA | 55 | config HW_RANDOM_VIA |
53 | tristate "VIA HW Random Number Generator support" | 56 | tristate "VIA HW Random Number Generator support" |
54 | depends on HW_RANDOM && X86_32 | 57 | depends on HW_RANDOM && X86_32 |
55 | default y | 58 | default HW_RANDOM |
56 | ---help--- | 59 | ---help--- |
57 | This driver provides kernel-side support for the Random Number | 60 | This driver provides kernel-side support for the Random Number |
58 | Generator hardware found on VIA based motherboards. | 61 | Generator hardware found on VIA based motherboards. |
@@ -65,7 +68,7 @@ config HW_RANDOM_VIA | |||
65 | config HW_RANDOM_IXP4XX | 68 | config HW_RANDOM_IXP4XX |
66 | tristate "Intel IXP4xx NPU HW Random Number Generator support" | 69 | tristate "Intel IXP4xx NPU HW Random Number Generator support" |
67 | depends on HW_RANDOM && ARCH_IXP4XX | 70 | depends on HW_RANDOM && ARCH_IXP4XX |
68 | default y | 71 | default HW_RANDOM |
69 | ---help--- | 72 | ---help--- |
70 | This driver provides kernel-side support for the Random | 73 | This driver provides kernel-side support for the Random |
71 | Number Generator hardware found on the Intel IXP4xx NPU. | 74 | Number Generator hardware found on the Intel IXP4xx NPU. |
@@ -78,7 +81,7 @@ config HW_RANDOM_IXP4XX | |||
78 | config HW_RANDOM_OMAP | 81 | config HW_RANDOM_OMAP |
79 | tristate "OMAP Random Number Generator support" | 82 | tristate "OMAP Random Number Generator support" |
80 | depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX) | 83 | depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX) |
81 | default y | 84 | default HW_RANDOM |
82 | ---help--- | 85 | ---help--- |
83 | This driver provides kernel-side support for the Random Number | 86 | This driver provides kernel-side support for the Random Number |
84 | Generator hardware found on OMAP16xx and OMAP24xx multimedia | 87 | Generator hardware found on OMAP16xx and OMAP24xx multimedia |
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index e263ae96f940..c41fa19454e3 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile | |||
@@ -2,7 +2,8 @@ | |||
2 | # Makefile for HW Random Number Generator (RNG) device drivers. | 2 | # Makefile for HW Random Number Generator (RNG) device drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_HW_RANDOM) += core.o | 5 | obj-$(CONFIG_HW_RANDOM) += rng-core.o |
6 | rng-core-y := core.o | ||
6 | obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o | 7 | obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o |
7 | obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o | 8 | obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o |
8 | obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o | 9 | obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o |
diff --git a/drivers/char/ip2/i2cmd.h b/drivers/char/ip2/i2cmd.h index baa4e721b758..29277ec6b8ed 100644 --- a/drivers/char/ip2/i2cmd.h +++ b/drivers/char/ip2/i2cmd.h | |||
@@ -367,11 +367,6 @@ static UCHAR cc02[]; | |||
367 | #define CSE_NULL 3 // Replace with a null | 367 | #define CSE_NULL 3 // Replace with a null |
368 | #define CSE_MARK 4 // Replace with a 3-character sequence (as Unix) | 368 | #define CSE_MARK 4 // Replace with a 3-character sequence (as Unix) |
369 | 369 | ||
370 | #define CMD_SET_REPLACEMENT(arg,ch) \ | ||
371 | (((cmdSyntaxPtr)(ct36a))->cmd[1] = (arg), \ | ||
372 | (((cmdSyntaxPtr)(ct36a))->cmd[2] = (ch), \ | ||
373 | (cmdSyntaxPtr)(ct36a)) | ||
374 | |||
375 | #define CSE_REPLACE 0x8 // Replace the errored character with the | 370 | #define CSE_REPLACE 0x8 // Replace the errored character with the |
376 | // replacement character defined here | 371 | // replacement character defined here |
377 | 372 | ||
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c index c213fdbdb2b0..78045767ec33 100644 --- a/drivers/char/ip2/i2lib.c +++ b/drivers/char/ip2/i2lib.c | |||
@@ -1016,7 +1016,6 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count) | |||
1016 | unsigned short channel; | 1016 | unsigned short channel; |
1017 | unsigned short stuffIndex; | 1017 | unsigned short stuffIndex; |
1018 | unsigned long flags; | 1018 | unsigned long flags; |
1019 | int rc = 0; | ||
1020 | 1019 | ||
1021 | int bailout = 10; | 1020 | int bailout = 10; |
1022 | 1021 | ||
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index 0030cd8e2e95..6c59baa887a8 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c | |||
@@ -33,11 +33,13 @@ | |||
33 | #include <linux/ipmi_msgdefs.h> /* for completion codes */ | 33 | #include <linux/ipmi_msgdefs.h> /* for completion codes */ |
34 | #include "ipmi_si_sm.h" | 34 | #include "ipmi_si_sm.h" |
35 | 35 | ||
36 | static int bt_debug = 0x00; /* Production value 0, see following flags */ | 36 | #define BT_DEBUG_OFF 0 /* Used in production */ |
37 | #define BT_DEBUG_ENABLE 1 /* Generic messages */ | ||
38 | #define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ | ||
39 | #define BT_DEBUG_STATES 4 /* Verbose look at state changes */ | ||
40 | |||
41 | static int bt_debug = BT_DEBUG_OFF; | ||
37 | 42 | ||
38 | #define BT_DEBUG_ENABLE 1 | ||
39 | #define BT_DEBUG_MSG 2 | ||
40 | #define BT_DEBUG_STATES 4 | ||
41 | module_param(bt_debug, int, 0644); | 43 | module_param(bt_debug, int, 0644); |
42 | MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); | 44 | MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); |
43 | 45 | ||
@@ -47,38 +49,54 @@ MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); | |||
47 | Since the Open IPMI architecture is single-message oriented at this | 49 | Since the Open IPMI architecture is single-message oriented at this |
48 | stage, the queue depth of BT is of no concern. */ | 50 | stage, the queue depth of BT is of no concern. */ |
49 | 51 | ||
50 | #define BT_NORMAL_TIMEOUT 5000000 /* seconds in microseconds */ | 52 | #define BT_NORMAL_TIMEOUT 5 /* seconds */ |
51 | #define BT_RETRY_LIMIT 2 | 53 | #define BT_NORMAL_RETRY_LIMIT 2 |
52 | #define BT_RESET_DELAY 6000000 /* 6 seconds after warm reset */ | 54 | #define BT_RESET_DELAY 6 /* seconds after warm reset */ |
55 | |||
56 | /* States are written in chronological order and usually cover | ||
57 | multiple rows of the state table discussion in the IPMI spec. */ | ||
53 | 58 | ||
54 | enum bt_states { | 59 | enum bt_states { |
55 | BT_STATE_IDLE, | 60 | BT_STATE_IDLE = 0, /* Order is critical in this list */ |
56 | BT_STATE_XACTION_START, | 61 | BT_STATE_XACTION_START, |
57 | BT_STATE_WRITE_BYTES, | 62 | BT_STATE_WRITE_BYTES, |
58 | BT_STATE_WRITE_END, | ||
59 | BT_STATE_WRITE_CONSUME, | 63 | BT_STATE_WRITE_CONSUME, |
60 | BT_STATE_B2H_WAIT, | 64 | BT_STATE_READ_WAIT, |
61 | BT_STATE_READ_END, | 65 | BT_STATE_CLEAR_B2H, |
62 | BT_STATE_RESET1, /* These must come last */ | 66 | BT_STATE_READ_BYTES, |
67 | BT_STATE_RESET1, /* These must come last */ | ||
63 | BT_STATE_RESET2, | 68 | BT_STATE_RESET2, |
64 | BT_STATE_RESET3, | 69 | BT_STATE_RESET3, |
65 | BT_STATE_RESTART, | 70 | BT_STATE_RESTART, |
66 | BT_STATE_HOSED | 71 | BT_STATE_PRINTME, |
72 | BT_STATE_CAPABILITIES_BEGIN, | ||
73 | BT_STATE_CAPABILITIES_END, | ||
74 | BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ | ||
67 | }; | 75 | }; |
68 | 76 | ||
77 | /* Macros seen at the end of state "case" blocks. They help with legibility | ||
78 | and debugging. */ | ||
79 | |||
80 | #define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; } | ||
81 | |||
82 | #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } | ||
83 | |||
69 | struct si_sm_data { | 84 | struct si_sm_data { |
70 | enum bt_states state; | 85 | enum bt_states state; |
71 | enum bt_states last_state; /* assist printing and resets */ | ||
72 | unsigned char seq; /* BT sequence number */ | 86 | unsigned char seq; /* BT sequence number */ |
73 | struct si_sm_io *io; | 87 | struct si_sm_io *io; |
74 | unsigned char write_data[IPMI_MAX_MSG_LENGTH]; | 88 | unsigned char write_data[IPMI_MAX_MSG_LENGTH]; |
75 | int write_count; | 89 | int write_count; |
76 | unsigned char read_data[IPMI_MAX_MSG_LENGTH]; | 90 | unsigned char read_data[IPMI_MAX_MSG_LENGTH]; |
77 | int read_count; | 91 | int read_count; |
78 | int truncated; | 92 | int truncated; |
79 | long timeout; | 93 | long timeout; /* microseconds countdown */ |
80 | unsigned int error_retries; /* end of "common" fields */ | 94 | int error_retries; /* end of "common" fields */ |
81 | int nonzero_status; /* hung BMCs stay all 0 */ | 95 | int nonzero_status; /* hung BMCs stay all 0 */ |
96 | enum bt_states complete; /* to divert the state machine */ | ||
97 | int BT_CAP_outreqs; | ||
98 | long BT_CAP_req2rsp; | ||
99 | int BT_CAP_retries; /* Recommended retries */ | ||
82 | }; | 100 | }; |
83 | 101 | ||
84 | #define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */ | 102 | #define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */ |
@@ -111,86 +129,118 @@ struct si_sm_data { | |||
111 | static char *state2txt(unsigned char state) | 129 | static char *state2txt(unsigned char state) |
112 | { | 130 | { |
113 | switch (state) { | 131 | switch (state) { |
114 | case BT_STATE_IDLE: return("IDLE"); | 132 | case BT_STATE_IDLE: return("IDLE"); |
115 | case BT_STATE_XACTION_START: return("XACTION"); | 133 | case BT_STATE_XACTION_START: return("XACTION"); |
116 | case BT_STATE_WRITE_BYTES: return("WR_BYTES"); | 134 | case BT_STATE_WRITE_BYTES: return("WR_BYTES"); |
117 | case BT_STATE_WRITE_END: return("WR_END"); | 135 | case BT_STATE_WRITE_CONSUME: return("WR_CONSUME"); |
118 | case BT_STATE_WRITE_CONSUME: return("WR_CONSUME"); | 136 | case BT_STATE_READ_WAIT: return("RD_WAIT"); |
119 | case BT_STATE_B2H_WAIT: return("B2H_WAIT"); | 137 | case BT_STATE_CLEAR_B2H: return("CLEAR_B2H"); |
120 | case BT_STATE_READ_END: return("RD_END"); | 138 | case BT_STATE_READ_BYTES: return("RD_BYTES"); |
121 | case BT_STATE_RESET1: return("RESET1"); | 139 | case BT_STATE_RESET1: return("RESET1"); |
122 | case BT_STATE_RESET2: return("RESET2"); | 140 | case BT_STATE_RESET2: return("RESET2"); |
123 | case BT_STATE_RESET3: return("RESET3"); | 141 | case BT_STATE_RESET3: return("RESET3"); |
124 | case BT_STATE_RESTART: return("RESTART"); | 142 | case BT_STATE_RESTART: return("RESTART"); |
125 | case BT_STATE_HOSED: return("HOSED"); | 143 | case BT_STATE_LONG_BUSY: return("LONG_BUSY"); |
144 | case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN"); | ||
145 | case BT_STATE_CAPABILITIES_END: return("CAP_END"); | ||
126 | } | 146 | } |
127 | return("BAD STATE"); | 147 | return("BAD STATE"); |
128 | } | 148 | } |
129 | #define STATE2TXT state2txt(bt->state) | 149 | #define STATE2TXT state2txt(bt->state) |
130 | 150 | ||
131 | static char *status2txt(unsigned char status, char *buf) | 151 | static char *status2txt(unsigned char status) |
132 | { | 152 | { |
153 | /* | ||
154 | * This cannot be called by two threads at the same time and | ||
155 | * the buffer is always consumed immediately, so the static is | ||
156 | * safe to use. | ||
157 | */ | ||
158 | static char buf[40]; | ||
159 | |||
133 | strcpy(buf, "[ "); | 160 | strcpy(buf, "[ "); |
134 | if (status & BT_B_BUSY) strcat(buf, "B_BUSY "); | 161 | if (status & BT_B_BUSY) |
135 | if (status & BT_H_BUSY) strcat(buf, "H_BUSY "); | 162 | strcat(buf, "B_BUSY "); |
136 | if (status & BT_OEM0) strcat(buf, "OEM0 "); | 163 | if (status & BT_H_BUSY) |
137 | if (status & BT_SMS_ATN) strcat(buf, "SMS "); | 164 | strcat(buf, "H_BUSY "); |
138 | if (status & BT_B2H_ATN) strcat(buf, "B2H "); | 165 | if (status & BT_OEM0) |
139 | if (status & BT_H2B_ATN) strcat(buf, "H2B "); | 166 | strcat(buf, "OEM0 "); |
167 | if (status & BT_SMS_ATN) | ||
168 | strcat(buf, "SMS "); | ||
169 | if (status & BT_B2H_ATN) | ||
170 | strcat(buf, "B2H "); | ||
171 | if (status & BT_H2B_ATN) | ||
172 | strcat(buf, "H2B "); | ||
140 | strcat(buf, "]"); | 173 | strcat(buf, "]"); |
141 | return buf; | 174 | return buf; |
142 | } | 175 | } |
143 | #define STATUS2TXT(buf) status2txt(status, buf) | 176 | #define STATUS2TXT status2txt(status) |
177 | |||
178 | /* called externally at insmod time, and internally on cleanup */ | ||
144 | 179 | ||
145 | /* This will be called from within this module on a hosed condition */ | ||
146 | #define FIRST_SEQ 0 | ||
147 | static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) | 180 | static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) |
148 | { | 181 | { |
149 | bt->state = BT_STATE_IDLE; | 182 | memset(bt, 0, sizeof(struct si_sm_data)); |
150 | bt->last_state = BT_STATE_IDLE; | 183 | if (bt->io != io) { /* external: one-time only things */ |
151 | bt->seq = FIRST_SEQ; | 184 | bt->io = io; |
152 | bt->io = io; | 185 | bt->seq = 0; |
153 | bt->write_count = 0; | 186 | } |
154 | bt->read_count = 0; | 187 | bt->state = BT_STATE_IDLE; /* start here */ |
155 | bt->error_retries = 0; | 188 | bt->complete = BT_STATE_IDLE; /* end here */ |
156 | bt->nonzero_status = 0; | 189 | bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000; |
157 | bt->truncated = 0; | 190 | bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; |
158 | bt->timeout = BT_NORMAL_TIMEOUT; | 191 | /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */ |
159 | return 3; /* We claim 3 bytes of space; ought to check SPMI table */ | 192 | return 3; /* We claim 3 bytes of space; ought to check SPMI table */ |
160 | } | 193 | } |
161 | 194 | ||
195 | /* Jam a completion code (probably an error) into a response */ | ||
196 | |||
197 | static void force_result(struct si_sm_data *bt, unsigned char completion_code) | ||
198 | { | ||
199 | bt->read_data[0] = 4; /* # following bytes */ | ||
200 | bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */ | ||
201 | bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */ | ||
202 | bt->read_data[3] = bt->write_data[3]; /* Command */ | ||
203 | bt->read_data[4] = completion_code; | ||
204 | bt->read_count = 5; | ||
205 | } | ||
206 | |||
207 | /* The upper state machine starts here */ | ||
208 | |||
162 | static int bt_start_transaction(struct si_sm_data *bt, | 209 | static int bt_start_transaction(struct si_sm_data *bt, |
163 | unsigned char *data, | 210 | unsigned char *data, |
164 | unsigned int size) | 211 | unsigned int size) |
165 | { | 212 | { |
166 | unsigned int i; | 213 | unsigned int i; |
167 | 214 | ||
168 | if ((size < 2) || (size > (IPMI_MAX_MSG_LENGTH - 2))) | 215 | if (size < 2) |
169 | return -1; | 216 | return IPMI_REQ_LEN_INVALID_ERR; |
217 | if (size > IPMI_MAX_MSG_LENGTH) | ||
218 | return IPMI_REQ_LEN_EXCEEDED_ERR; | ||
170 | 219 | ||
171 | if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED)) | 220 | if (bt->state == BT_STATE_LONG_BUSY) |
172 | return -2; | 221 | return IPMI_NODE_BUSY_ERR; |
222 | |||
223 | if (bt->state != BT_STATE_IDLE) | ||
224 | return IPMI_NOT_IN_MY_STATE_ERR; | ||
173 | 225 | ||
174 | if (bt_debug & BT_DEBUG_MSG) { | 226 | if (bt_debug & BT_DEBUG_MSG) { |
175 | printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n"); | 227 | printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); |
176 | printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq); | 228 | printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); |
177 | for (i = 0; i < size; i ++) | 229 | for (i = 0; i < size; i ++) |
178 | printk (" %02x", data[i]); | 230 | printk (" %02x", data[i]); |
179 | printk("\n"); | 231 | printk("\n"); |
180 | } | 232 | } |
181 | bt->write_data[0] = size + 1; /* all data plus seq byte */ | 233 | bt->write_data[0] = size + 1; /* all data plus seq byte */ |
182 | bt->write_data[1] = *data; /* NetFn/LUN */ | 234 | bt->write_data[1] = *data; /* NetFn/LUN */ |
183 | bt->write_data[2] = bt->seq; | 235 | bt->write_data[2] = bt->seq++; |
184 | memcpy(bt->write_data + 3, data + 1, size - 1); | 236 | memcpy(bt->write_data + 3, data + 1, size - 1); |
185 | bt->write_count = size + 2; | 237 | bt->write_count = size + 2; |
186 | |||
187 | bt->error_retries = 0; | 238 | bt->error_retries = 0; |
188 | bt->nonzero_status = 0; | 239 | bt->nonzero_status = 0; |
189 | bt->read_count = 0; | ||
190 | bt->truncated = 0; | 240 | bt->truncated = 0; |
191 | bt->state = BT_STATE_XACTION_START; | 241 | bt->state = BT_STATE_XACTION_START; |
192 | bt->last_state = BT_STATE_IDLE; | 242 | bt->timeout = bt->BT_CAP_req2rsp; |
193 | bt->timeout = BT_NORMAL_TIMEOUT; | 243 | force_result(bt, IPMI_ERR_UNSPECIFIED); |
194 | return 0; | 244 | return 0; |
195 | } | 245 | } |
196 | 246 | ||
@@ -198,38 +248,30 @@ static int bt_start_transaction(struct si_sm_data *bt, | |||
198 | it calls this. Strip out the length and seq bytes. */ | 248 | it calls this. Strip out the length and seq bytes. */ |
199 | 249 | ||
200 | static int bt_get_result(struct si_sm_data *bt, | 250 | static int bt_get_result(struct si_sm_data *bt, |
201 | unsigned char *data, | 251 | unsigned char *data, |
202 | unsigned int length) | 252 | unsigned int length) |
203 | { | 253 | { |
204 | int i, msg_len; | 254 | int i, msg_len; |
205 | 255 | ||
206 | msg_len = bt->read_count - 2; /* account for length & seq */ | 256 | msg_len = bt->read_count - 2; /* account for length & seq */ |
207 | /* Always NetFn, Cmd, cCode */ | ||
208 | if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { | 257 | if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { |
209 | printk(KERN_DEBUG "BT results: bad msg_len = %d\n", msg_len); | 258 | force_result(bt, IPMI_ERR_UNSPECIFIED); |
210 | data[0] = bt->write_data[1] | 0x4; /* Kludge a response */ | ||
211 | data[1] = bt->write_data[3]; | ||
212 | data[2] = IPMI_ERR_UNSPECIFIED; | ||
213 | msg_len = 3; | 259 | msg_len = 3; |
214 | } else { | 260 | } |
215 | data[0] = bt->read_data[1]; | 261 | data[0] = bt->read_data[1]; |
216 | data[1] = bt->read_data[3]; | 262 | data[1] = bt->read_data[3]; |
217 | if (length < msg_len) | 263 | if (length < msg_len || bt->truncated) { |
218 | bt->truncated = 1; | 264 | data[2] = IPMI_ERR_MSG_TRUNCATED; |
219 | if (bt->truncated) { /* can be set in read_all_bytes() */ | 265 | msg_len = 3; |
220 | data[2] = IPMI_ERR_MSG_TRUNCATED; | 266 | } else |
221 | msg_len = 3; | 267 | memcpy(data + 2, bt->read_data + 4, msg_len - 2); |
222 | } else | ||
223 | memcpy(data + 2, bt->read_data + 4, msg_len - 2); | ||
224 | 268 | ||
225 | if (bt_debug & BT_DEBUG_MSG) { | 269 | if (bt_debug & BT_DEBUG_MSG) { |
226 | printk (KERN_WARNING "BT: res (raw)"); | 270 | printk (KERN_WARNING "BT: result %d bytes:", msg_len); |
227 | for (i = 0; i < msg_len; i++) | 271 | for (i = 0; i < msg_len; i++) |
228 | printk(" %02x", data[i]); | 272 | printk(" %02x", data[i]); |
229 | printk ("\n"); | 273 | printk ("\n"); |
230 | } | ||
231 | } | 274 | } |
232 | bt->read_count = 0; /* paranoia */ | ||
233 | return msg_len; | 275 | return msg_len; |
234 | } | 276 | } |
235 | 277 | ||
@@ -238,22 +280,40 @@ static int bt_get_result(struct si_sm_data *bt, | |||
238 | 280 | ||
239 | static void reset_flags(struct si_sm_data *bt) | 281 | static void reset_flags(struct si_sm_data *bt) |
240 | { | 282 | { |
283 | if (bt_debug) | ||
284 | printk(KERN_WARNING "IPMI BT: flag reset %s\n", | ||
285 | status2txt(BT_STATUS)); | ||
241 | if (BT_STATUS & BT_H_BUSY) | 286 | if (BT_STATUS & BT_H_BUSY) |
242 | BT_CONTROL(BT_H_BUSY); | 287 | BT_CONTROL(BT_H_BUSY); /* force clear */ |
243 | if (BT_STATUS & BT_B_BUSY) | 288 | BT_CONTROL(BT_CLR_WR_PTR); /* always reset */ |
244 | BT_CONTROL(BT_B_BUSY); | 289 | BT_CONTROL(BT_SMS_ATN); /* always clear */ |
245 | BT_CONTROL(BT_CLR_WR_PTR); | 290 | BT_INTMASK_W(BT_BMC_HWRST); |
246 | BT_CONTROL(BT_SMS_ATN); | 291 | } |
247 | 292 | ||
248 | if (BT_STATUS & BT_B2H_ATN) { | 293 | /* Get rid of an unwanted/stale response. This should only be needed for |
249 | int i; | 294 | BMCs that support multiple outstanding requests. */ |
250 | BT_CONTROL(BT_H_BUSY); | 295 | |
251 | BT_CONTROL(BT_B2H_ATN); | 296 | static void drain_BMC2HOST(struct si_sm_data *bt) |
252 | BT_CONTROL(BT_CLR_RD_PTR); | 297 | { |
253 | for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++) | 298 | int i, size; |
254 | BMC2HOST; | 299 | |
255 | BT_CONTROL(BT_H_BUSY); | 300 | if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */ |
256 | } | 301 | return; |
302 | |||
303 | BT_CONTROL(BT_H_BUSY); /* now set */ | ||
304 | BT_CONTROL(BT_B2H_ATN); /* always clear */ | ||
305 | BT_STATUS; /* pause */ | ||
306 | BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */ | ||
307 | BT_CONTROL(BT_CLR_RD_PTR); /* always reset */ | ||
308 | if (bt_debug) | ||
309 | printk(KERN_WARNING "IPMI BT: stale response %s; ", | ||
310 | status2txt(BT_STATUS)); | ||
311 | size = BMC2HOST; | ||
312 | for (i = 0; i < size ; i++) | ||
313 | BMC2HOST; | ||
314 | BT_CONTROL(BT_H_BUSY); /* now clear */ | ||
315 | if (bt_debug) | ||
316 | printk("drained %d bytes\n", size + 1); | ||
257 | } | 317 | } |
258 | 318 | ||
259 | static inline void write_all_bytes(struct si_sm_data *bt) | 319 | static inline void write_all_bytes(struct si_sm_data *bt) |
@@ -261,201 +321,256 @@ static inline void write_all_bytes(struct si_sm_data *bt) | |||
261 | int i; | 321 | int i; |
262 | 322 | ||
263 | if (bt_debug & BT_DEBUG_MSG) { | 323 | if (bt_debug & BT_DEBUG_MSG) { |
264 | printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", | 324 | printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", |
265 | bt->write_count, bt->seq); | 325 | bt->write_count, bt->seq); |
266 | for (i = 0; i < bt->write_count; i++) | 326 | for (i = 0; i < bt->write_count; i++) |
267 | printk (" %02x", bt->write_data[i]); | 327 | printk (" %02x", bt->write_data[i]); |
268 | printk ("\n"); | 328 | printk ("\n"); |
269 | } | 329 | } |
270 | for (i = 0; i < bt->write_count; i++) | 330 | for (i = 0; i < bt->write_count; i++) |
271 | HOST2BMC(bt->write_data[i]); | 331 | HOST2BMC(bt->write_data[i]); |
272 | } | 332 | } |
273 | 333 | ||
274 | static inline int read_all_bytes(struct si_sm_data *bt) | 334 | static inline int read_all_bytes(struct si_sm_data *bt) |
275 | { | 335 | { |
276 | unsigned char i; | 336 | unsigned char i; |
277 | 337 | ||
338 | /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. | ||
339 | Keep layout of first four bytes aligned with write_data[] */ | ||
340 | |||
278 | bt->read_data[0] = BMC2HOST; | 341 | bt->read_data[0] = BMC2HOST; |
279 | bt->read_count = bt->read_data[0]; | 342 | bt->read_count = bt->read_data[0]; |
280 | if (bt_debug & BT_DEBUG_MSG) | ||
281 | printk(KERN_WARNING "BT: read %d bytes:", bt->read_count); | ||
282 | 343 | ||
283 | /* minimum: length, NetFn, Seq, Cmd, cCode == 5 total, or 4 more | ||
284 | following the length byte. */ | ||
285 | if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) { | 344 | if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) { |
286 | if (bt_debug & BT_DEBUG_MSG) | 345 | if (bt_debug & BT_DEBUG_MSG) |
287 | printk("bad length %d\n", bt->read_count); | 346 | printk(KERN_WARNING "BT: bad raw rsp len=%d\n", |
347 | bt->read_count); | ||
288 | bt->truncated = 1; | 348 | bt->truncated = 1; |
289 | return 1; /* let next XACTION START clean it up */ | 349 | return 1; /* let next XACTION START clean it up */ |
290 | } | 350 | } |
291 | for (i = 1; i <= bt->read_count; i++) | 351 | for (i = 1; i <= bt->read_count; i++) |
292 | bt->read_data[i] = BMC2HOST; | 352 | bt->read_data[i] = BMC2HOST; |
293 | bt->read_count++; /* account for the length byte */ | 353 | bt->read_count++; /* Account internally for length byte */ |
294 | 354 | ||
295 | if (bt_debug & BT_DEBUG_MSG) { | 355 | if (bt_debug & BT_DEBUG_MSG) { |
296 | for (i = 0; i < bt->read_count; i++) | 356 | int max = bt->read_count; |
357 | |||
358 | printk(KERN_WARNING "BT: got %d bytes seq=0x%02X", | ||
359 | max, bt->read_data[2]); | ||
360 | if (max > 16) | ||
361 | max = 16; | ||
362 | for (i = 0; i < max; i++) | ||
297 | printk (" %02x", bt->read_data[i]); | 363 | printk (" %02x", bt->read_data[i]); |
298 | printk ("\n"); | 364 | printk ("%s\n", bt->read_count == max ? "" : " ..."); |
299 | } | 365 | } |
300 | if (bt->seq != bt->write_data[2]) /* idiot check */ | ||
301 | printk(KERN_DEBUG "BT: internal error: sequence mismatch\n"); | ||
302 | 366 | ||
303 | /* per the spec, the (NetFn, Seq, Cmd) tuples should match */ | 367 | /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ |
304 | if ((bt->read_data[3] == bt->write_data[3]) && /* Cmd */ | 368 | if ((bt->read_data[3] == bt->write_data[3]) && |
305 | (bt->read_data[2] == bt->write_data[2]) && /* Sequence */ | 369 | (bt->read_data[2] == bt->write_data[2]) && |
306 | ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) | 370 | ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) |
307 | return 1; | 371 | return 1; |
308 | 372 | ||
309 | if (bt_debug & BT_DEBUG_MSG) | 373 | if (bt_debug & BT_DEBUG_MSG) |
310 | printk(KERN_WARNING "BT: bad packet: " | 374 | printk(KERN_WARNING "IPMI BT: bad packet: " |
311 | "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", | 375 | "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", |
312 | bt->write_data[1], bt->write_data[2], bt->write_data[3], | 376 | bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3], |
313 | bt->read_data[1], bt->read_data[2], bt->read_data[3]); | 377 | bt->read_data[1], bt->read_data[2], bt->read_data[3]); |
314 | return 0; | 378 | return 0; |
315 | } | 379 | } |
316 | 380 | ||
317 | /* Modifies bt->state appropriately, need to get into the bt_event() switch */ | 381 | /* Restart if retries are left, or return an error completion code */ |
318 | 382 | ||
319 | static void error_recovery(struct si_sm_data *bt, char *reason) | 383 | static enum si_sm_result error_recovery(struct si_sm_data *bt, |
384 | unsigned char status, | ||
385 | unsigned char cCode) | ||
320 | { | 386 | { |
321 | unsigned char status; | 387 | char *reason; |
322 | char buf[40]; /* For getting status */ | ||
323 | 388 | ||
324 | bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */ | 389 | bt->timeout = bt->BT_CAP_req2rsp; |
325 | 390 | ||
326 | status = BT_STATUS; | 391 | switch (cCode) { |
327 | printk(KERN_DEBUG "BT: %s in %s %s\n", reason, STATE2TXT, | 392 | case IPMI_TIMEOUT_ERR: |
328 | STATUS2TXT(buf)); | 393 | reason = "timeout"; |
394 | break; | ||
395 | default: | ||
396 | reason = "internal error"; | ||
397 | break; | ||
398 | } | ||
399 | |||
400 | printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ | ||
401 | reason, STATE2TXT, STATUS2TXT); | ||
329 | 402 | ||
403 | /* Per the IPMI spec, retries are based on the sequence number | ||
404 | known only to this module, so manage a restart here. */ | ||
330 | (bt->error_retries)++; | 405 | (bt->error_retries)++; |
331 | if (bt->error_retries > BT_RETRY_LIMIT) { | 406 | if (bt->error_retries < bt->BT_CAP_retries) { |
332 | printk(KERN_DEBUG "retry limit (%d) exceeded\n", BT_RETRY_LIMIT); | 407 | printk("%d retries left\n", |
333 | bt->state = BT_STATE_HOSED; | 408 | bt->BT_CAP_retries - bt->error_retries); |
334 | if (!bt->nonzero_status) | 409 | bt->state = BT_STATE_RESTART; |
335 | printk(KERN_ERR "IPMI: BT stuck, try power cycle\n"); | 410 | return SI_SM_CALL_WITHOUT_DELAY; |
336 | else if (bt->error_retries <= BT_RETRY_LIMIT + 1) { | ||
337 | printk(KERN_DEBUG "IPMI: BT reset (takes 5 secs)\n"); | ||
338 | bt->state = BT_STATE_RESET1; | ||
339 | } | ||
340 | return; | ||
341 | } | 411 | } |
342 | 412 | ||
343 | /* Sometimes the BMC queues get in an "off-by-one" state...*/ | 413 | printk("failed %d retries, sending error response\n", |
344 | if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) { | 414 | bt->BT_CAP_retries); |
345 | printk(KERN_DEBUG "retry B2H_WAIT\n"); | 415 | if (!bt->nonzero_status) |
346 | return; | 416 | printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); |
417 | |||
418 | /* this is most likely during insmod */ | ||
419 | else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) { | ||
420 | printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n"); | ||
421 | bt->state = BT_STATE_RESET1; | ||
422 | return SI_SM_CALL_WITHOUT_DELAY; | ||
347 | } | 423 | } |
348 | 424 | ||
349 | printk(KERN_DEBUG "restart command\n"); | 425 | /* Concoct a useful error message, set up the next state, and |
350 | bt->state = BT_STATE_RESTART; | 426 | be done with this sequence. */ |
427 | |||
428 | bt->state = BT_STATE_IDLE; | ||
429 | switch (cCode) { | ||
430 | case IPMI_TIMEOUT_ERR: | ||
431 | if (status & BT_B_BUSY) { | ||
432 | cCode = IPMI_NODE_BUSY_ERR; | ||
433 | bt->state = BT_STATE_LONG_BUSY; | ||
434 | } | ||
435 | break; | ||
436 | default: | ||
437 | break; | ||
438 | } | ||
439 | force_result(bt, cCode); | ||
440 | return SI_SM_TRANSACTION_COMPLETE; | ||
351 | } | 441 | } |
352 | 442 | ||
353 | /* Check the status and (possibly) advance the BT state machine. The | 443 | /* Check status and (usually) take action and change this state machine. */ |
354 | default return is SI_SM_CALL_WITH_DELAY. */ | ||
355 | 444 | ||
356 | static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | 445 | static enum si_sm_result bt_event(struct si_sm_data *bt, long time) |
357 | { | 446 | { |
358 | unsigned char status; | 447 | unsigned char status, BT_CAP[8]; |
359 | char buf[40]; /* For getting status */ | 448 | static enum bt_states last_printed = BT_STATE_PRINTME; |
360 | int i; | 449 | int i; |
361 | 450 | ||
362 | status = BT_STATUS; | 451 | status = BT_STATUS; |
363 | bt->nonzero_status |= status; | 452 | bt->nonzero_status |= status; |
364 | 453 | if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) { | |
365 | if ((bt_debug & BT_DEBUG_STATES) && (bt->state != bt->last_state)) | ||
366 | printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n", | 454 | printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n", |
367 | STATE2TXT, | 455 | STATE2TXT, |
368 | STATUS2TXT(buf), | 456 | STATUS2TXT, |
369 | bt->timeout, | 457 | bt->timeout, |
370 | time); | 458 | time); |
371 | bt->last_state = bt->state; | 459 | last_printed = bt->state; |
460 | } | ||
372 | 461 | ||
373 | if (bt->state == BT_STATE_HOSED) | 462 | /* Commands that time out may still (eventually) provide a response. |
374 | return SI_SM_HOSED; | 463 | This stale response will get in the way of a new response so remove |
464 | it if possible (hopefully during IDLE). Even if it comes up later | ||
465 | it will be rejected by its (now-forgotten) seq number. */ | ||
466 | |||
467 | if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { | ||
468 | drain_BMC2HOST(bt); | ||
469 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); | ||
470 | } | ||
375 | 471 | ||
376 | if (bt->state != BT_STATE_IDLE) { /* do timeout test */ | 472 | if ((bt->state != BT_STATE_IDLE) && |
473 | (bt->state < BT_STATE_PRINTME)) { /* check timeout */ | ||
377 | bt->timeout -= time; | 474 | bt->timeout -= time; |
378 | if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) { | 475 | if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) |
379 | error_recovery(bt, "timed out"); | 476 | return error_recovery(bt, |
380 | return SI_SM_CALL_WITHOUT_DELAY; | 477 | status, |
381 | } | 478 | IPMI_TIMEOUT_ERR); |
382 | } | 479 | } |
383 | 480 | ||
384 | switch (bt->state) { | 481 | switch (bt->state) { |
385 | 482 | ||
386 | case BT_STATE_IDLE: /* check for asynchronous messages */ | 483 | /* Idle state first checks for asynchronous messages from another |
484 | channel, then does some opportunistic housekeeping. */ | ||
485 | |||
486 | case BT_STATE_IDLE: | ||
387 | if (status & BT_SMS_ATN) { | 487 | if (status & BT_SMS_ATN) { |
388 | BT_CONTROL(BT_SMS_ATN); /* clear it */ | 488 | BT_CONTROL(BT_SMS_ATN); /* clear it */ |
389 | return SI_SM_ATTN; | 489 | return SI_SM_ATTN; |
390 | } | 490 | } |
391 | return SI_SM_IDLE; | ||
392 | 491 | ||
393 | case BT_STATE_XACTION_START: | 492 | if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ |
394 | if (status & BT_H_BUSY) { | ||
395 | BT_CONTROL(BT_H_BUSY); | 493 | BT_CONTROL(BT_H_BUSY); |
396 | break; | ||
397 | } | ||
398 | if (status & BT_B2H_ATN) | ||
399 | break; | ||
400 | bt->state = BT_STATE_WRITE_BYTES; | ||
401 | return SI_SM_CALL_WITHOUT_DELAY; /* for logging */ | ||
402 | 494 | ||
403 | case BT_STATE_WRITE_BYTES: | 495 | /* Read BT capabilities if it hasn't been done yet */ |
496 | if (!bt->BT_CAP_outreqs) | ||
497 | BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, | ||
498 | SI_SM_CALL_WITHOUT_DELAY); | ||
499 | bt->timeout = bt->BT_CAP_req2rsp; | ||
500 | BT_SI_SM_RETURN(SI_SM_IDLE); | ||
501 | |||
502 | case BT_STATE_XACTION_START: | ||
404 | if (status & (BT_B_BUSY | BT_H2B_ATN)) | 503 | if (status & (BT_B_BUSY | BT_H2B_ATN)) |
405 | break; | 504 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); |
505 | if (BT_STATUS & BT_H_BUSY) | ||
506 | BT_CONTROL(BT_H_BUSY); /* force clear */ | ||
507 | BT_STATE_CHANGE(BT_STATE_WRITE_BYTES, | ||
508 | SI_SM_CALL_WITHOUT_DELAY); | ||
509 | |||
510 | case BT_STATE_WRITE_BYTES: | ||
511 | if (status & BT_H_BUSY) | ||
512 | BT_CONTROL(BT_H_BUSY); /* clear */ | ||
406 | BT_CONTROL(BT_CLR_WR_PTR); | 513 | BT_CONTROL(BT_CLR_WR_PTR); |
407 | write_all_bytes(bt); | 514 | write_all_bytes(bt); |
408 | BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */ | 515 | BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */ |
409 | bt->state = BT_STATE_WRITE_CONSUME; | 516 | BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME, |
410 | return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */ | 517 | SI_SM_CALL_WITHOUT_DELAY); |
411 | |||
412 | case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */ | ||
413 | if (status & (BT_H2B_ATN | BT_B_BUSY)) | ||
414 | break; | ||
415 | bt->state = BT_STATE_B2H_WAIT; | ||
416 | /* fall through with status */ | ||
417 | |||
418 | /* Stay in BT_STATE_B2H_WAIT until a packet matches. However, spinning | ||
419 | hard here, constantly reading status, seems to hold off the | ||
420 | generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */ | ||
421 | |||
422 | case BT_STATE_B2H_WAIT: | ||
423 | if (!(status & BT_B2H_ATN)) | ||
424 | break; | ||
425 | |||
426 | /* Assume ordered, uncached writes: no need to wait */ | ||
427 | if (!(status & BT_H_BUSY)) | ||
428 | BT_CONTROL(BT_H_BUSY); /* set */ | ||
429 | BT_CONTROL(BT_B2H_ATN); /* clear it, ACK to the BMC */ | ||
430 | BT_CONTROL(BT_CLR_RD_PTR); /* reset the queue */ | ||
431 | i = read_all_bytes(bt); | ||
432 | BT_CONTROL(BT_H_BUSY); /* clear */ | ||
433 | if (!i) /* Try this state again */ | ||
434 | break; | ||
435 | bt->state = BT_STATE_READ_END; | ||
436 | return SI_SM_CALL_WITHOUT_DELAY; /* for logging */ | ||
437 | |||
438 | case BT_STATE_READ_END: | ||
439 | |||
440 | /* I could wait on BT_H_BUSY to go clear for a truly clean | ||
441 | exit. However, this is already done in XACTION_START | ||
442 | and the (possible) extra loop/status/possible wait affects | ||
443 | performance. So, as long as it works, just ignore H_BUSY */ | ||
444 | |||
445 | #ifdef MAKE_THIS_TRUE_IF_NECESSARY | ||
446 | 518 | ||
447 | if (status & BT_H_BUSY) | 519 | case BT_STATE_WRITE_CONSUME: |
448 | break; | 520 | if (status & (BT_B_BUSY | BT_H2B_ATN)) |
449 | #endif | 521 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); |
450 | bt->seq++; | 522 | BT_STATE_CHANGE(BT_STATE_READ_WAIT, |
451 | bt->state = BT_STATE_IDLE; | 523 | SI_SM_CALL_WITHOUT_DELAY); |
452 | return SI_SM_TRANSACTION_COMPLETE; | 524 | |
525 | /* Spinning hard can suppress B2H_ATN and force a timeout */ | ||
526 | |||
527 | case BT_STATE_READ_WAIT: | ||
528 | if (!(status & BT_B2H_ATN)) | ||
529 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); | ||
530 | BT_CONTROL(BT_H_BUSY); /* set */ | ||
531 | |||
532 | /* Uncached, ordered writes should just proceeed serially but | ||
533 | some BMCs don't clear B2H_ATN with one hit. Fast-path a | ||
534 | workaround without too much penalty to the general case. */ | ||
535 | |||
536 | BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ | ||
537 | BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, | ||
538 | SI_SM_CALL_WITHOUT_DELAY); | ||
539 | |||
540 | case BT_STATE_CLEAR_B2H: | ||
541 | if (status & BT_B2H_ATN) { /* keep hitting it */ | ||
542 | BT_CONTROL(BT_B2H_ATN); | ||
543 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); | ||
544 | } | ||
545 | BT_STATE_CHANGE(BT_STATE_READ_BYTES, | ||
546 | SI_SM_CALL_WITHOUT_DELAY); | ||
547 | |||
548 | case BT_STATE_READ_BYTES: | ||
549 | if (!(status & BT_H_BUSY)) /* check in case of retry */ | ||
550 | BT_CONTROL(BT_H_BUSY); | ||
551 | BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ | ||
552 | i = read_all_bytes(bt); /* true == packet seq match */ | ||
553 | BT_CONTROL(BT_H_BUSY); /* NOW clear */ | ||
554 | if (!i) /* Not my message */ | ||
555 | BT_STATE_CHANGE(BT_STATE_READ_WAIT, | ||
556 | SI_SM_CALL_WITHOUT_DELAY); | ||
557 | bt->state = bt->complete; | ||
558 | return bt->state == BT_STATE_IDLE ? /* where to next? */ | ||
559 | SI_SM_TRANSACTION_COMPLETE : /* normal */ | ||
560 | SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */ | ||
561 | |||
562 | case BT_STATE_LONG_BUSY: /* For example: after FW update */ | ||
563 | if (!(status & BT_B_BUSY)) { | ||
564 | reset_flags(bt); /* next state is now IDLE */ | ||
565 | bt_init_data(bt, bt->io); | ||
566 | } | ||
567 | return SI_SM_CALL_WITH_DELAY; /* No repeat printing */ | ||
453 | 568 | ||
454 | case BT_STATE_RESET1: | 569 | case BT_STATE_RESET1: |
455 | reset_flags(bt); | 570 | reset_flags(bt); |
456 | bt->timeout = BT_RESET_DELAY; | 571 | drain_BMC2HOST(bt); |
457 | bt->state = BT_STATE_RESET2; | 572 | BT_STATE_CHANGE(BT_STATE_RESET2, |
458 | break; | 573 | SI_SM_CALL_WITH_DELAY); |
459 | 574 | ||
460 | case BT_STATE_RESET2: /* Send a soft reset */ | 575 | case BT_STATE_RESET2: /* Send a soft reset */ |
461 | BT_CONTROL(BT_CLR_WR_PTR); | 576 | BT_CONTROL(BT_CLR_WR_PTR); |
@@ -464,29 +579,59 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
464 | HOST2BMC(42); /* Sequence number */ | 579 | HOST2BMC(42); /* Sequence number */ |
465 | HOST2BMC(3); /* Cmd == Soft reset */ | 580 | HOST2BMC(3); /* Cmd == Soft reset */ |
466 | BT_CONTROL(BT_H2B_ATN); | 581 | BT_CONTROL(BT_H2B_ATN); |
467 | bt->state = BT_STATE_RESET3; | 582 | bt->timeout = BT_RESET_DELAY * 1000000; |
468 | break; | 583 | BT_STATE_CHANGE(BT_STATE_RESET3, |
584 | SI_SM_CALL_WITH_DELAY); | ||
469 | 585 | ||
470 | case BT_STATE_RESET3: | 586 | case BT_STATE_RESET3: /* Hold off everything for a bit */ |
471 | if (bt->timeout > 0) | 587 | if (bt->timeout > 0) |
472 | return SI_SM_CALL_WITH_DELAY; | 588 | return SI_SM_CALL_WITH_DELAY; |
473 | bt->state = BT_STATE_RESTART; /* printk in debug modes */ | 589 | drain_BMC2HOST(bt); |
474 | break; | 590 | BT_STATE_CHANGE(BT_STATE_RESTART, |
591 | SI_SM_CALL_WITH_DELAY); | ||
475 | 592 | ||
476 | case BT_STATE_RESTART: /* don't reset retries! */ | 593 | case BT_STATE_RESTART: /* don't reset retries or seq! */ |
477 | reset_flags(bt); | ||
478 | bt->write_data[2] = ++bt->seq; | ||
479 | bt->read_count = 0; | 594 | bt->read_count = 0; |
480 | bt->nonzero_status = 0; | 595 | bt->nonzero_status = 0; |
481 | bt->timeout = BT_NORMAL_TIMEOUT; | 596 | bt->timeout = bt->BT_CAP_req2rsp; |
482 | bt->state = BT_STATE_XACTION_START; | 597 | BT_STATE_CHANGE(BT_STATE_XACTION_START, |
483 | break; | 598 | SI_SM_CALL_WITH_DELAY); |
484 | 599 | ||
485 | default: /* HOSED is supposed to be caught much earlier */ | 600 | /* Get BT Capabilities, using timing of upper level state machine. |
486 | error_recovery(bt, "internal logic error"); | 601 | Set outreqs to prevent infinite loop on timeout. */ |
487 | break; | 602 | case BT_STATE_CAPABILITIES_BEGIN: |
488 | } | 603 | bt->BT_CAP_outreqs = 1; |
489 | return SI_SM_CALL_WITH_DELAY; | 604 | { |
605 | unsigned char GetBT_CAP[] = { 0x18, 0x36 }; | ||
606 | bt->state = BT_STATE_IDLE; | ||
607 | bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); | ||
608 | } | ||
609 | bt->complete = BT_STATE_CAPABILITIES_END; | ||
610 | BT_STATE_CHANGE(BT_STATE_XACTION_START, | ||
611 | SI_SM_CALL_WITH_DELAY); | ||
612 | |||
613 | case BT_STATE_CAPABILITIES_END: | ||
614 | i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); | ||
615 | bt_init_data(bt, bt->io); | ||
616 | if ((i == 8) && !BT_CAP[2]) { | ||
617 | bt->BT_CAP_outreqs = BT_CAP[3]; | ||
618 | bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000; | ||
619 | bt->BT_CAP_retries = BT_CAP[7]; | ||
620 | } else | ||
621 | printk(KERN_WARNING "IPMI BT: using default values\n"); | ||
622 | if (!bt->BT_CAP_outreqs) | ||
623 | bt->BT_CAP_outreqs = 1; | ||
624 | printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", | ||
625 | bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries); | ||
626 | bt->timeout = bt->BT_CAP_req2rsp; | ||
627 | return SI_SM_CALL_WITHOUT_DELAY; | ||
628 | |||
629 | default: /* should never occur */ | ||
630 | return error_recovery(bt, | ||
631 | status, | ||
632 | IPMI_ERR_UNSPECIFIED); | ||
633 | } | ||
634 | return SI_SM_CALL_WITH_DELAY; | ||
490 | } | 635 | } |
491 | 636 | ||
492 | static int bt_detect(struct si_sm_data *bt) | 637 | static int bt_detect(struct si_sm_data *bt) |
@@ -497,7 +642,7 @@ static int bt_detect(struct si_sm_data *bt) | |||
497 | test that first. The calling routine uses negative logic. */ | 642 | test that first. The calling routine uses negative logic. */ |
498 | 643 | ||
499 | if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) | 644 | if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) |
500 | return 1; | 645 | return 1; |
501 | reset_flags(bt); | 646 | reset_flags(bt); |
502 | return 0; | 647 | return 0; |
503 | } | 648 | } |
@@ -513,11 +658,11 @@ static int bt_size(void) | |||
513 | 658 | ||
514 | struct si_sm_handlers bt_smi_handlers = | 659 | struct si_sm_handlers bt_smi_handlers = |
515 | { | 660 | { |
516 | .init_data = bt_init_data, | 661 | .init_data = bt_init_data, |
517 | .start_transaction = bt_start_transaction, | 662 | .start_transaction = bt_start_transaction, |
518 | .get_result = bt_get_result, | 663 | .get_result = bt_get_result, |
519 | .event = bt_event, | 664 | .event = bt_event, |
520 | .detect = bt_detect, | 665 | .detect = bt_detect, |
521 | .cleanup = bt_cleanup, | 666 | .cleanup = bt_cleanup, |
522 | .size = bt_size, | 667 | .size = bt_size, |
523 | }; | 668 | }; |
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 81fcf0ce21d1..375d3378eecd 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c | |||
@@ -596,6 +596,31 @@ static int ipmi_ioctl(struct inode *inode, | |||
596 | rv = 0; | 596 | rv = 0; |
597 | break; | 597 | break; |
598 | } | 598 | } |
599 | |||
600 | case IPMICTL_GET_MAINTENANCE_MODE_CMD: | ||
601 | { | ||
602 | int mode; | ||
603 | |||
604 | mode = ipmi_get_maintenance_mode(priv->user); | ||
605 | if (copy_to_user(arg, &mode, sizeof(mode))) { | ||
606 | rv = -EFAULT; | ||
607 | break; | ||
608 | } | ||
609 | rv = 0; | ||
610 | break; | ||
611 | } | ||
612 | |||
613 | case IPMICTL_SET_MAINTENANCE_MODE_CMD: | ||
614 | { | ||
615 | int mode; | ||
616 | |||
617 | if (copy_from_user(&mode, arg, sizeof(mode))) { | ||
618 | rv = -EFAULT; | ||
619 | break; | ||
620 | } | ||
621 | rv = ipmi_set_maintenance_mode(priv->user, mode); | ||
622 | break; | ||
623 | } | ||
599 | } | 624 | } |
600 | 625 | ||
601 | return rv; | 626 | return rv; |
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index 2062675f9e99..c1b8228cb7b6 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c | |||
@@ -93,8 +93,8 @@ enum kcs_states { | |||
93 | state machine. */ | 93 | state machine. */ |
94 | }; | 94 | }; |
95 | 95 | ||
96 | #define MAX_KCS_READ_SIZE 80 | 96 | #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH |
97 | #define MAX_KCS_WRITE_SIZE 80 | 97 | #define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH |
98 | 98 | ||
99 | /* Timeouts in microseconds. */ | 99 | /* Timeouts in microseconds. */ |
100 | #define IBF_RETRY_TIMEOUT 1000000 | 100 | #define IBF_RETRY_TIMEOUT 1000000 |
@@ -261,12 +261,14 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, | |||
261 | { | 261 | { |
262 | unsigned int i; | 262 | unsigned int i; |
263 | 263 | ||
264 | if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) { | 264 | if (size < 2) |
265 | return -1; | 265 | return IPMI_REQ_LEN_INVALID_ERR; |
266 | } | 266 | if (size > MAX_KCS_WRITE_SIZE) |
267 | if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) { | 267 | return IPMI_REQ_LEN_EXCEEDED_ERR; |
268 | return -2; | 268 | |
269 | } | 269 | if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) |
270 | return IPMI_NOT_IN_MY_STATE_ERR; | ||
271 | |||
270 | if (kcs_debug & KCS_DEBUG_MSG) { | 272 | if (kcs_debug & KCS_DEBUG_MSG) { |
271 | printk(KERN_DEBUG "start_kcs_transaction -"); | 273 | printk(KERN_DEBUG "start_kcs_transaction -"); |
272 | for (i = 0; i < size; i ++) { | 274 | for (i = 0; i < size; i ++) { |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index c47add8e47df..5703ee28e1cc 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | #define PFX "IPMI message handler: " | 49 | #define PFX "IPMI message handler: " |
50 | 50 | ||
51 | #define IPMI_DRIVER_VERSION "39.0" | 51 | #define IPMI_DRIVER_VERSION "39.1" |
52 | 52 | ||
53 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); | 53 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); |
54 | static int ipmi_init_msghandler(void); | 54 | static int ipmi_init_msghandler(void); |
@@ -59,6 +59,9 @@ static int initialized = 0; | |||
59 | static struct proc_dir_entry *proc_ipmi_root = NULL; | 59 | static struct proc_dir_entry *proc_ipmi_root = NULL; |
60 | #endif /* CONFIG_PROC_FS */ | 60 | #endif /* CONFIG_PROC_FS */ |
61 | 61 | ||
62 | /* Remain in auto-maintenance mode for this amount of time (in ms). */ | ||
63 | #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000 | ||
64 | |||
62 | #define MAX_EVENTS_IN_QUEUE 25 | 65 | #define MAX_EVENTS_IN_QUEUE 25 |
63 | 66 | ||
64 | /* Don't let a message sit in a queue forever, always time it with at lest | 67 | /* Don't let a message sit in a queue forever, always time it with at lest |
@@ -193,17 +196,28 @@ struct ipmi_smi | |||
193 | 196 | ||
194 | struct kref refcount; | 197 | struct kref refcount; |
195 | 198 | ||
199 | /* Used for a list of interfaces. */ | ||
200 | struct list_head link; | ||
201 | |||
196 | /* The list of upper layers that are using me. seq_lock | 202 | /* The list of upper layers that are using me. seq_lock |
197 | * protects this. */ | 203 | * protects this. */ |
198 | struct list_head users; | 204 | struct list_head users; |
199 | 205 | ||
206 | /* Information to supply to users. */ | ||
207 | unsigned char ipmi_version_major; | ||
208 | unsigned char ipmi_version_minor; | ||
209 | |||
200 | /* Used for wake ups at startup. */ | 210 | /* Used for wake ups at startup. */ |
201 | wait_queue_head_t waitq; | 211 | wait_queue_head_t waitq; |
202 | 212 | ||
203 | struct bmc_device *bmc; | 213 | struct bmc_device *bmc; |
204 | char *my_dev_name; | 214 | char *my_dev_name; |
215 | char *sysfs_name; | ||
205 | 216 | ||
206 | /* This is the lower-layer's sender routine. */ | 217 | /* This is the lower-layer's sender routine. Note that you |
218 | * must either be holding the ipmi_interfaces_mutex or be in | ||
219 | * an umpreemptible region to use this. You must fetch the | ||
220 | * value into a local variable and make sure it is not NULL. */ | ||
207 | struct ipmi_smi_handlers *handlers; | 221 | struct ipmi_smi_handlers *handlers; |
208 | void *send_info; | 222 | void *send_info; |
209 | 223 | ||
@@ -242,6 +256,7 @@ struct ipmi_smi | |||
242 | spinlock_t events_lock; /* For dealing with event stuff. */ | 256 | spinlock_t events_lock; /* For dealing with event stuff. */ |
243 | struct list_head waiting_events; | 257 | struct list_head waiting_events; |
244 | unsigned int waiting_events_count; /* How many events in queue? */ | 258 | unsigned int waiting_events_count; /* How many events in queue? */ |
259 | int delivering_events; | ||
245 | 260 | ||
246 | /* The event receiver for my BMC, only really used at panic | 261 | /* The event receiver for my BMC, only really used at panic |
247 | shutdown as a place to store this. */ | 262 | shutdown as a place to store this. */ |
@@ -250,6 +265,12 @@ struct ipmi_smi | |||
250 | unsigned char local_sel_device; | 265 | unsigned char local_sel_device; |
251 | unsigned char local_event_generator; | 266 | unsigned char local_event_generator; |
252 | 267 | ||
268 | /* For handling of maintenance mode. */ | ||
269 | int maintenance_mode; | ||
270 | int maintenance_mode_enable; | ||
271 | int auto_maintenance_timeout; | ||
272 | spinlock_t maintenance_mode_lock; /* Used in a timer... */ | ||
273 | |||
253 | /* A cheap hack, if this is non-null and a message to an | 274 | /* A cheap hack, if this is non-null and a message to an |
254 | interface comes in with a NULL user, call this routine with | 275 | interface comes in with a NULL user, call this routine with |
255 | it. Note that the message will still be freed by the | 276 | it. Note that the message will still be freed by the |
@@ -338,13 +359,6 @@ struct ipmi_smi | |||
338 | }; | 359 | }; |
339 | #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) | 360 | #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) |
340 | 361 | ||
341 | /* Used to mark an interface entry that cannot be used but is not a | ||
342 | * free entry, either, primarily used at creation and deletion time so | ||
343 | * a slot doesn't get reused too quickly. */ | ||
344 | #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1)) | ||
345 | #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ | ||
346 | || (i == IPMI_INVALID_INTERFACE_ENTRY)) | ||
347 | |||
348 | /** | 362 | /** |
349 | * The driver model view of the IPMI messaging driver. | 363 | * The driver model view of the IPMI messaging driver. |
350 | */ | 364 | */ |
@@ -354,16 +368,13 @@ static struct device_driver ipmidriver = { | |||
354 | }; | 368 | }; |
355 | static DEFINE_MUTEX(ipmidriver_mutex); | 369 | static DEFINE_MUTEX(ipmidriver_mutex); |
356 | 370 | ||
357 | #define MAX_IPMI_INTERFACES 4 | 371 | static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces); |
358 | static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; | 372 | static DEFINE_MUTEX(ipmi_interfaces_mutex); |
359 | |||
360 | /* Directly protects the ipmi_interfaces data structure. */ | ||
361 | static DEFINE_SPINLOCK(interfaces_lock); | ||
362 | 373 | ||
363 | /* List of watchers that want to know when smi's are added and | 374 | /* List of watchers that want to know when smi's are added and |
364 | deleted. */ | 375 | deleted. */ |
365 | static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); | 376 | static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); |
366 | static DECLARE_RWSEM(smi_watchers_sem); | 377 | static DEFINE_MUTEX(smi_watchers_mutex); |
367 | 378 | ||
368 | 379 | ||
369 | static void free_recv_msg_list(struct list_head *q) | 380 | static void free_recv_msg_list(struct list_head *q) |
@@ -423,48 +434,84 @@ static void intf_free(struct kref *ref) | |||
423 | kfree(intf); | 434 | kfree(intf); |
424 | } | 435 | } |
425 | 436 | ||
437 | struct watcher_entry { | ||
438 | int intf_num; | ||
439 | ipmi_smi_t intf; | ||
440 | struct list_head link; | ||
441 | }; | ||
442 | |||
426 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) | 443 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) |
427 | { | 444 | { |
428 | int i; | 445 | ipmi_smi_t intf; |
429 | unsigned long flags; | 446 | struct list_head to_deliver = LIST_HEAD_INIT(to_deliver); |
447 | struct watcher_entry *e, *e2; | ||
448 | |||
449 | mutex_lock(&smi_watchers_mutex); | ||
450 | |||
451 | mutex_lock(&ipmi_interfaces_mutex); | ||
430 | 452 | ||
431 | down_write(&smi_watchers_sem); | 453 | /* Build a list of things to deliver. */ |
432 | list_add(&(watcher->link), &smi_watchers); | 454 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
433 | up_write(&smi_watchers_sem); | 455 | if (intf->intf_num == -1) |
434 | spin_lock_irqsave(&interfaces_lock, flags); | ||
435 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { | ||
436 | ipmi_smi_t intf = ipmi_interfaces[i]; | ||
437 | if (IPMI_INVALID_INTERFACE(intf)) | ||
438 | continue; | 456 | continue; |
439 | spin_unlock_irqrestore(&interfaces_lock, flags); | 457 | e = kmalloc(sizeof(*e), GFP_KERNEL); |
440 | watcher->new_smi(i, intf->si_dev); | 458 | if (!e) |
441 | spin_lock_irqsave(&interfaces_lock, flags); | 459 | goto out_err; |
460 | kref_get(&intf->refcount); | ||
461 | e->intf = intf; | ||
462 | e->intf_num = intf->intf_num; | ||
463 | list_add_tail(&e->link, &to_deliver); | ||
442 | } | 464 | } |
443 | spin_unlock_irqrestore(&interfaces_lock, flags); | 465 | |
466 | /* We will succeed, so add it to the list. */ | ||
467 | list_add(&watcher->link, &smi_watchers); | ||
468 | |||
469 | mutex_unlock(&ipmi_interfaces_mutex); | ||
470 | |||
471 | list_for_each_entry_safe(e, e2, &to_deliver, link) { | ||
472 | list_del(&e->link); | ||
473 | watcher->new_smi(e->intf_num, e->intf->si_dev); | ||
474 | kref_put(&e->intf->refcount, intf_free); | ||
475 | kfree(e); | ||
476 | } | ||
477 | |||
478 | mutex_unlock(&smi_watchers_mutex); | ||
479 | |||
444 | return 0; | 480 | return 0; |
481 | |||
482 | out_err: | ||
483 | mutex_unlock(&ipmi_interfaces_mutex); | ||
484 | mutex_unlock(&smi_watchers_mutex); | ||
485 | list_for_each_entry_safe(e, e2, &to_deliver, link) { | ||
486 | list_del(&e->link); | ||
487 | kref_put(&e->intf->refcount, intf_free); | ||
488 | kfree(e); | ||
489 | } | ||
490 | return -ENOMEM; | ||
445 | } | 491 | } |
446 | 492 | ||
447 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) | 493 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) |
448 | { | 494 | { |
449 | down_write(&smi_watchers_sem); | 495 | mutex_lock(&smi_watchers_mutex); |
450 | list_del(&(watcher->link)); | 496 | list_del(&(watcher->link)); |
451 | up_write(&smi_watchers_sem); | 497 | mutex_unlock(&smi_watchers_mutex); |
452 | return 0; | 498 | return 0; |
453 | } | 499 | } |
454 | 500 | ||
501 | /* | ||
502 | * Must be called with smi_watchers_mutex held. | ||
503 | */ | ||
455 | static void | 504 | static void |
456 | call_smi_watchers(int i, struct device *dev) | 505 | call_smi_watchers(int i, struct device *dev) |
457 | { | 506 | { |
458 | struct ipmi_smi_watcher *w; | 507 | struct ipmi_smi_watcher *w; |
459 | 508 | ||
460 | down_read(&smi_watchers_sem); | ||
461 | list_for_each_entry(w, &smi_watchers, link) { | 509 | list_for_each_entry(w, &smi_watchers, link) { |
462 | if (try_module_get(w->owner)) { | 510 | if (try_module_get(w->owner)) { |
463 | w->new_smi(i, dev); | 511 | w->new_smi(i, dev); |
464 | module_put(w->owner); | 512 | module_put(w->owner); |
465 | } | 513 | } |
466 | } | 514 | } |
467 | up_read(&smi_watchers_sem); | ||
468 | } | 515 | } |
469 | 516 | ||
470 | static int | 517 | static int |
@@ -590,6 +637,17 @@ static void deliver_response(struct ipmi_recv_msg *msg) | |||
590 | } | 637 | } |
591 | } | 638 | } |
592 | 639 | ||
640 | static void | ||
641 | deliver_err_response(struct ipmi_recv_msg *msg, int err) | ||
642 | { | ||
643 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | ||
644 | msg->msg_data[0] = err; | ||
645 | msg->msg.netfn |= 1; /* Convert to a response. */ | ||
646 | msg->msg.data_len = 1; | ||
647 | msg->msg.data = msg->msg_data; | ||
648 | deliver_response(msg); | ||
649 | } | ||
650 | |||
593 | /* Find the next sequence number not being used and add the given | 651 | /* Find the next sequence number not being used and add the given |
594 | message with the given timeout to the sequence table. This must be | 652 | message with the given timeout to the sequence table. This must be |
595 | called with the interface's seq_lock held. */ | 653 | called with the interface's seq_lock held. */ |
@@ -727,14 +785,8 @@ static int intf_err_seq(ipmi_smi_t intf, | |||
727 | } | 785 | } |
728 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 786 | spin_unlock_irqrestore(&(intf->seq_lock), flags); |
729 | 787 | ||
730 | if (msg) { | 788 | if (msg) |
731 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 789 | deliver_err_response(msg, err); |
732 | msg->msg_data[0] = err; | ||
733 | msg->msg.netfn |= 1; /* Convert to a response. */ | ||
734 | msg->msg.data_len = 1; | ||
735 | msg->msg.data = msg->msg_data; | ||
736 | deliver_response(msg); | ||
737 | } | ||
738 | 790 | ||
739 | return rv; | 791 | return rv; |
740 | } | 792 | } |
@@ -776,17 +828,18 @@ int ipmi_create_user(unsigned int if_num, | |||
776 | if (!new_user) | 828 | if (!new_user) |
777 | return -ENOMEM; | 829 | return -ENOMEM; |
778 | 830 | ||
779 | spin_lock_irqsave(&interfaces_lock, flags); | 831 | mutex_lock(&ipmi_interfaces_mutex); |
780 | intf = ipmi_interfaces[if_num]; | 832 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
781 | if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) { | 833 | if (intf->intf_num == if_num) |
782 | spin_unlock_irqrestore(&interfaces_lock, flags); | 834 | goto found; |
783 | rv = -EINVAL; | ||
784 | goto out_kfree; | ||
785 | } | 835 | } |
836 | /* Not found, return an error */ | ||
837 | rv = -EINVAL; | ||
838 | goto out_kfree; | ||
786 | 839 | ||
840 | found: | ||
787 | /* Note that each existing user holds a refcount to the interface. */ | 841 | /* Note that each existing user holds a refcount to the interface. */ |
788 | kref_get(&intf->refcount); | 842 | kref_get(&intf->refcount); |
789 | spin_unlock_irqrestore(&interfaces_lock, flags); | ||
790 | 843 | ||
791 | kref_init(&new_user->refcount); | 844 | kref_init(&new_user->refcount); |
792 | new_user->handler = handler; | 845 | new_user->handler = handler; |
@@ -807,6 +860,10 @@ int ipmi_create_user(unsigned int if_num, | |||
807 | } | 860 | } |
808 | } | 861 | } |
809 | 862 | ||
863 | /* Hold the lock so intf->handlers is guaranteed to be good | ||
864 | * until now */ | ||
865 | mutex_unlock(&ipmi_interfaces_mutex); | ||
866 | |||
810 | new_user->valid = 1; | 867 | new_user->valid = 1; |
811 | spin_lock_irqsave(&intf->seq_lock, flags); | 868 | spin_lock_irqsave(&intf->seq_lock, flags); |
812 | list_add_rcu(&new_user->link, &intf->users); | 869 | list_add_rcu(&new_user->link, &intf->users); |
@@ -817,6 +874,7 @@ int ipmi_create_user(unsigned int if_num, | |||
817 | out_kref: | 874 | out_kref: |
818 | kref_put(&intf->refcount, intf_free); | 875 | kref_put(&intf->refcount, intf_free); |
819 | out_kfree: | 876 | out_kfree: |
877 | mutex_unlock(&ipmi_interfaces_mutex); | ||
820 | kfree(new_user); | 878 | kfree(new_user); |
821 | return rv; | 879 | return rv; |
822 | } | 880 | } |
@@ -846,6 +904,7 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
846 | && (intf->seq_table[i].recv_msg->user == user)) | 904 | && (intf->seq_table[i].recv_msg->user == user)) |
847 | { | 905 | { |
848 | intf->seq_table[i].inuse = 0; | 906 | intf->seq_table[i].inuse = 0; |
907 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); | ||
849 | } | 908 | } |
850 | } | 909 | } |
851 | spin_unlock_irqrestore(&intf->seq_lock, flags); | 910 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
@@ -872,9 +931,13 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
872 | kfree(rcvr); | 931 | kfree(rcvr); |
873 | } | 932 | } |
874 | 933 | ||
875 | module_put(intf->handlers->owner); | 934 | mutex_lock(&ipmi_interfaces_mutex); |
876 | if (intf->handlers->dec_usecount) | 935 | if (intf->handlers) { |
877 | intf->handlers->dec_usecount(intf->send_info); | 936 | module_put(intf->handlers->owner); |
937 | if (intf->handlers->dec_usecount) | ||
938 | intf->handlers->dec_usecount(intf->send_info); | ||
939 | } | ||
940 | mutex_unlock(&ipmi_interfaces_mutex); | ||
878 | 941 | ||
879 | kref_put(&intf->refcount, intf_free); | 942 | kref_put(&intf->refcount, intf_free); |
880 | 943 | ||
@@ -887,8 +950,8 @@ void ipmi_get_version(ipmi_user_t user, | |||
887 | unsigned char *major, | 950 | unsigned char *major, |
888 | unsigned char *minor) | 951 | unsigned char *minor) |
889 | { | 952 | { |
890 | *major = ipmi_version_major(&user->intf->bmc->id); | 953 | *major = user->intf->ipmi_version_major; |
891 | *minor = ipmi_version_minor(&user->intf->bmc->id); | 954 | *minor = user->intf->ipmi_version_minor; |
892 | } | 955 | } |
893 | 956 | ||
894 | int ipmi_set_my_address(ipmi_user_t user, | 957 | int ipmi_set_my_address(ipmi_user_t user, |
@@ -931,6 +994,65 @@ int ipmi_get_my_LUN(ipmi_user_t user, | |||
931 | return 0; | 994 | return 0; |
932 | } | 995 | } |
933 | 996 | ||
997 | int ipmi_get_maintenance_mode(ipmi_user_t user) | ||
998 | { | ||
999 | int mode; | ||
1000 | unsigned long flags; | ||
1001 | |||
1002 | spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); | ||
1003 | mode = user->intf->maintenance_mode; | ||
1004 | spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); | ||
1005 | |||
1006 | return mode; | ||
1007 | } | ||
1008 | EXPORT_SYMBOL(ipmi_get_maintenance_mode); | ||
1009 | |||
1010 | static void maintenance_mode_update(ipmi_smi_t intf) | ||
1011 | { | ||
1012 | if (intf->handlers->set_maintenance_mode) | ||
1013 | intf->handlers->set_maintenance_mode( | ||
1014 | intf->send_info, intf->maintenance_mode_enable); | ||
1015 | } | ||
1016 | |||
1017 | int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) | ||
1018 | { | ||
1019 | int rv = 0; | ||
1020 | unsigned long flags; | ||
1021 | ipmi_smi_t intf = user->intf; | ||
1022 | |||
1023 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); | ||
1024 | if (intf->maintenance_mode != mode) { | ||
1025 | switch (mode) { | ||
1026 | case IPMI_MAINTENANCE_MODE_AUTO: | ||
1027 | intf->maintenance_mode = mode; | ||
1028 | intf->maintenance_mode_enable | ||
1029 | = (intf->auto_maintenance_timeout > 0); | ||
1030 | break; | ||
1031 | |||
1032 | case IPMI_MAINTENANCE_MODE_OFF: | ||
1033 | intf->maintenance_mode = mode; | ||
1034 | intf->maintenance_mode_enable = 0; | ||
1035 | break; | ||
1036 | |||
1037 | case IPMI_MAINTENANCE_MODE_ON: | ||
1038 | intf->maintenance_mode = mode; | ||
1039 | intf->maintenance_mode_enable = 1; | ||
1040 | break; | ||
1041 | |||
1042 | default: | ||
1043 | rv = -EINVAL; | ||
1044 | goto out_unlock; | ||
1045 | } | ||
1046 | |||
1047 | maintenance_mode_update(intf); | ||
1048 | } | ||
1049 | out_unlock: | ||
1050 | spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); | ||
1051 | |||
1052 | return rv; | ||
1053 | } | ||
1054 | EXPORT_SYMBOL(ipmi_set_maintenance_mode); | ||
1055 | |||
934 | int ipmi_set_gets_events(ipmi_user_t user, int val) | 1056 | int ipmi_set_gets_events(ipmi_user_t user, int val) |
935 | { | 1057 | { |
936 | unsigned long flags; | 1058 | unsigned long flags; |
@@ -943,20 +1065,33 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) | |||
943 | spin_lock_irqsave(&intf->events_lock, flags); | 1065 | spin_lock_irqsave(&intf->events_lock, flags); |
944 | user->gets_events = val; | 1066 | user->gets_events = val; |
945 | 1067 | ||
946 | if (val) { | 1068 | if (intf->delivering_events) |
947 | /* Deliver any queued events. */ | 1069 | /* |
1070 | * Another thread is delivering events for this, so | ||
1071 | * let it handle any new events. | ||
1072 | */ | ||
1073 | goto out; | ||
1074 | |||
1075 | /* Deliver any queued events. */ | ||
1076 | while (user->gets_events && !list_empty(&intf->waiting_events)) { | ||
948 | list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) | 1077 | list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) |
949 | list_move_tail(&msg->link, &msgs); | 1078 | list_move_tail(&msg->link, &msgs); |
950 | intf->waiting_events_count = 0; | 1079 | intf->waiting_events_count = 0; |
951 | } | ||
952 | 1080 | ||
953 | /* Hold the events lock while doing this to preserve order. */ | 1081 | intf->delivering_events = 1; |
954 | list_for_each_entry_safe(msg, msg2, &msgs, link) { | 1082 | spin_unlock_irqrestore(&intf->events_lock, flags); |
955 | msg->user = user; | 1083 | |
956 | kref_get(&user->refcount); | 1084 | list_for_each_entry_safe(msg, msg2, &msgs, link) { |
957 | deliver_response(msg); | 1085 | msg->user = user; |
1086 | kref_get(&user->refcount); | ||
1087 | deliver_response(msg); | ||
1088 | } | ||
1089 | |||
1090 | spin_lock_irqsave(&intf->events_lock, flags); | ||
1091 | intf->delivering_events = 0; | ||
958 | } | 1092 | } |
959 | 1093 | ||
1094 | out: | ||
960 | spin_unlock_irqrestore(&intf->events_lock, flags); | 1095 | spin_unlock_irqrestore(&intf->events_lock, flags); |
961 | 1096 | ||
962 | return 0; | 1097 | return 0; |
@@ -1067,7 +1202,8 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, | |||
1067 | void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) | 1202 | void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) |
1068 | { | 1203 | { |
1069 | ipmi_smi_t intf = user->intf; | 1204 | ipmi_smi_t intf = user->intf; |
1070 | intf->handlers->set_run_to_completion(intf->send_info, val); | 1205 | if (intf->handlers) |
1206 | intf->handlers->set_run_to_completion(intf->send_info, val); | ||
1071 | } | 1207 | } |
1072 | 1208 | ||
1073 | static unsigned char | 1209 | static unsigned char |
@@ -1178,10 +1314,11 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1178 | int retries, | 1314 | int retries, |
1179 | unsigned int retry_time_ms) | 1315 | unsigned int retry_time_ms) |
1180 | { | 1316 | { |
1181 | int rv = 0; | 1317 | int rv = 0; |
1182 | struct ipmi_smi_msg *smi_msg; | 1318 | struct ipmi_smi_msg *smi_msg; |
1183 | struct ipmi_recv_msg *recv_msg; | 1319 | struct ipmi_recv_msg *recv_msg; |
1184 | unsigned long flags; | 1320 | unsigned long flags; |
1321 | struct ipmi_smi_handlers *handlers; | ||
1185 | 1322 | ||
1186 | 1323 | ||
1187 | if (supplied_recv) { | 1324 | if (supplied_recv) { |
@@ -1204,6 +1341,13 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1204 | } | 1341 | } |
1205 | } | 1342 | } |
1206 | 1343 | ||
1344 | rcu_read_lock(); | ||
1345 | handlers = intf->handlers; | ||
1346 | if (!handlers) { | ||
1347 | rv = -ENODEV; | ||
1348 | goto out_err; | ||
1349 | } | ||
1350 | |||
1207 | recv_msg->user = user; | 1351 | recv_msg->user = user; |
1208 | if (user) | 1352 | if (user) |
1209 | kref_get(&user->refcount); | 1353 | kref_get(&user->refcount); |
@@ -1246,6 +1390,24 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1246 | goto out_err; | 1390 | goto out_err; |
1247 | } | 1391 | } |
1248 | 1392 | ||
1393 | if (((msg->netfn == IPMI_NETFN_APP_REQUEST) | ||
1394 | && ((msg->cmd == IPMI_COLD_RESET_CMD) | ||
1395 | || (msg->cmd == IPMI_WARM_RESET_CMD))) | ||
1396 | || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) | ||
1397 | { | ||
1398 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); | ||
1399 | intf->auto_maintenance_timeout | ||
1400 | = IPMI_MAINTENANCE_MODE_TIMEOUT; | ||
1401 | if (!intf->maintenance_mode | ||
1402 | && !intf->maintenance_mode_enable) | ||
1403 | { | ||
1404 | intf->maintenance_mode_enable = 1; | ||
1405 | maintenance_mode_update(intf); | ||
1406 | } | ||
1407 | spin_unlock_irqrestore(&intf->maintenance_mode_lock, | ||
1408 | flags); | ||
1409 | } | ||
1410 | |||
1249 | if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { | 1411 | if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { |
1250 | spin_lock_irqsave(&intf->counter_lock, flags); | 1412 | spin_lock_irqsave(&intf->counter_lock, flags); |
1251 | intf->sent_invalid_commands++; | 1413 | intf->sent_invalid_commands++; |
@@ -1520,11 +1682,14 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1520 | printk("\n"); | 1682 | printk("\n"); |
1521 | } | 1683 | } |
1522 | #endif | 1684 | #endif |
1523 | intf->handlers->sender(intf->send_info, smi_msg, priority); | 1685 | |
1686 | handlers->sender(intf->send_info, smi_msg, priority); | ||
1687 | rcu_read_unlock(); | ||
1524 | 1688 | ||
1525 | return 0; | 1689 | return 0; |
1526 | 1690 | ||
1527 | out_err: | 1691 | out_err: |
1692 | rcu_read_unlock(); | ||
1528 | ipmi_free_smi_msg(smi_msg); | 1693 | ipmi_free_smi_msg(smi_msg); |
1529 | ipmi_free_recv_msg(recv_msg); | 1694 | ipmi_free_recv_msg(recv_msg); |
1530 | return rv; | 1695 | return rv; |
@@ -1604,6 +1769,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user, | |||
1604 | -1, 0); | 1769 | -1, 0); |
1605 | } | 1770 | } |
1606 | 1771 | ||
1772 | #ifdef CONFIG_PROC_FS | ||
1607 | static int ipmb_file_read_proc(char *page, char **start, off_t off, | 1773 | static int ipmb_file_read_proc(char *page, char **start, off_t off, |
1608 | int count, int *eof, void *data) | 1774 | int count, int *eof, void *data) |
1609 | { | 1775 | { |
@@ -1692,6 +1858,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off, | |||
1692 | 1858 | ||
1693 | return (out - ((char *) page)); | 1859 | return (out - ((char *) page)); |
1694 | } | 1860 | } |
1861 | #endif /* CONFIG_PROC_FS */ | ||
1695 | 1862 | ||
1696 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | 1863 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, |
1697 | read_proc_t *read_proc, write_proc_t *write_proc, | 1864 | read_proc_t *read_proc, write_proc_t *write_proc, |
@@ -1817,13 +1984,12 @@ static int __find_bmc_prod_dev_id(struct device *dev, void *data) | |||
1817 | struct bmc_device *bmc = dev_get_drvdata(dev); | 1984 | struct bmc_device *bmc = dev_get_drvdata(dev); |
1818 | 1985 | ||
1819 | return (bmc->id.product_id == id->product_id | 1986 | return (bmc->id.product_id == id->product_id |
1820 | && bmc->id.product_id == id->product_id | ||
1821 | && bmc->id.device_id == id->device_id); | 1987 | && bmc->id.device_id == id->device_id); |
1822 | } | 1988 | } |
1823 | 1989 | ||
1824 | static struct bmc_device *ipmi_find_bmc_prod_dev_id( | 1990 | static struct bmc_device *ipmi_find_bmc_prod_dev_id( |
1825 | struct device_driver *drv, | 1991 | struct device_driver *drv, |
1826 | unsigned char product_id, unsigned char device_id) | 1992 | unsigned int product_id, unsigned char device_id) |
1827 | { | 1993 | { |
1828 | struct prod_dev_id id = { | 1994 | struct prod_dev_id id = { |
1829 | .product_id = product_id, | 1995 | .product_id = product_id, |
@@ -1940,6 +2106,9 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr, | |||
1940 | 2106 | ||
1941 | static void remove_files(struct bmc_device *bmc) | 2107 | static void remove_files(struct bmc_device *bmc) |
1942 | { | 2108 | { |
2109 | if (!bmc->dev) | ||
2110 | return; | ||
2111 | |||
1943 | device_remove_file(&bmc->dev->dev, | 2112 | device_remove_file(&bmc->dev->dev, |
1944 | &bmc->device_id_attr); | 2113 | &bmc->device_id_attr); |
1945 | device_remove_file(&bmc->dev->dev, | 2114 | device_remove_file(&bmc->dev->dev, |
@@ -1973,7 +2142,8 @@ cleanup_bmc_device(struct kref *ref) | |||
1973 | bmc = container_of(ref, struct bmc_device, refcount); | 2142 | bmc = container_of(ref, struct bmc_device, refcount); |
1974 | 2143 | ||
1975 | remove_files(bmc); | 2144 | remove_files(bmc); |
1976 | platform_device_unregister(bmc->dev); | 2145 | if (bmc->dev) |
2146 | platform_device_unregister(bmc->dev); | ||
1977 | kfree(bmc); | 2147 | kfree(bmc); |
1978 | } | 2148 | } |
1979 | 2149 | ||
@@ -1981,7 +2151,11 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf) | |||
1981 | { | 2151 | { |
1982 | struct bmc_device *bmc = intf->bmc; | 2152 | struct bmc_device *bmc = intf->bmc; |
1983 | 2153 | ||
1984 | sysfs_remove_link(&intf->si_dev->kobj, "bmc"); | 2154 | if (intf->sysfs_name) { |
2155 | sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name); | ||
2156 | kfree(intf->sysfs_name); | ||
2157 | intf->sysfs_name = NULL; | ||
2158 | } | ||
1985 | if (intf->my_dev_name) { | 2159 | if (intf->my_dev_name) { |
1986 | sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); | 2160 | sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); |
1987 | kfree(intf->my_dev_name); | 2161 | kfree(intf->my_dev_name); |
@@ -1990,6 +2164,7 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf) | |||
1990 | 2164 | ||
1991 | mutex_lock(&ipmidriver_mutex); | 2165 | mutex_lock(&ipmidriver_mutex); |
1992 | kref_put(&bmc->refcount, cleanup_bmc_device); | 2166 | kref_put(&bmc->refcount, cleanup_bmc_device); |
2167 | intf->bmc = NULL; | ||
1993 | mutex_unlock(&ipmidriver_mutex); | 2168 | mutex_unlock(&ipmidriver_mutex); |
1994 | } | 2169 | } |
1995 | 2170 | ||
@@ -1997,6 +2172,56 @@ static int create_files(struct bmc_device *bmc) | |||
1997 | { | 2172 | { |
1998 | int err; | 2173 | int err; |
1999 | 2174 | ||
2175 | bmc->device_id_attr.attr.name = "device_id"; | ||
2176 | bmc->device_id_attr.attr.owner = THIS_MODULE; | ||
2177 | bmc->device_id_attr.attr.mode = S_IRUGO; | ||
2178 | bmc->device_id_attr.show = device_id_show; | ||
2179 | |||
2180 | bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; | ||
2181 | bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE; | ||
2182 | bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; | ||
2183 | bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; | ||
2184 | |||
2185 | bmc->revision_attr.attr.name = "revision"; | ||
2186 | bmc->revision_attr.attr.owner = THIS_MODULE; | ||
2187 | bmc->revision_attr.attr.mode = S_IRUGO; | ||
2188 | bmc->revision_attr.show = revision_show; | ||
2189 | |||
2190 | bmc->firmware_rev_attr.attr.name = "firmware_revision"; | ||
2191 | bmc->firmware_rev_attr.attr.owner = THIS_MODULE; | ||
2192 | bmc->firmware_rev_attr.attr.mode = S_IRUGO; | ||
2193 | bmc->firmware_rev_attr.show = firmware_rev_show; | ||
2194 | |||
2195 | bmc->version_attr.attr.name = "ipmi_version"; | ||
2196 | bmc->version_attr.attr.owner = THIS_MODULE; | ||
2197 | bmc->version_attr.attr.mode = S_IRUGO; | ||
2198 | bmc->version_attr.show = ipmi_version_show; | ||
2199 | |||
2200 | bmc->add_dev_support_attr.attr.name = "additional_device_support"; | ||
2201 | bmc->add_dev_support_attr.attr.owner = THIS_MODULE; | ||
2202 | bmc->add_dev_support_attr.attr.mode = S_IRUGO; | ||
2203 | bmc->add_dev_support_attr.show = add_dev_support_show; | ||
2204 | |||
2205 | bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; | ||
2206 | bmc->manufacturer_id_attr.attr.owner = THIS_MODULE; | ||
2207 | bmc->manufacturer_id_attr.attr.mode = S_IRUGO; | ||
2208 | bmc->manufacturer_id_attr.show = manufacturer_id_show; | ||
2209 | |||
2210 | bmc->product_id_attr.attr.name = "product_id"; | ||
2211 | bmc->product_id_attr.attr.owner = THIS_MODULE; | ||
2212 | bmc->product_id_attr.attr.mode = S_IRUGO; | ||
2213 | bmc->product_id_attr.show = product_id_show; | ||
2214 | |||
2215 | bmc->guid_attr.attr.name = "guid"; | ||
2216 | bmc->guid_attr.attr.owner = THIS_MODULE; | ||
2217 | bmc->guid_attr.attr.mode = S_IRUGO; | ||
2218 | bmc->guid_attr.show = guid_show; | ||
2219 | |||
2220 | bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; | ||
2221 | bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE; | ||
2222 | bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; | ||
2223 | bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; | ||
2224 | |||
2000 | err = device_create_file(&bmc->dev->dev, | 2225 | err = device_create_file(&bmc->dev->dev, |
2001 | &bmc->device_id_attr); | 2226 | &bmc->device_id_attr); |
2002 | if (err) goto out; | 2227 | if (err) goto out; |
@@ -2066,7 +2291,8 @@ out: | |||
2066 | return err; | 2291 | return err; |
2067 | } | 2292 | } |
2068 | 2293 | ||
2069 | static int ipmi_bmc_register(ipmi_smi_t intf) | 2294 | static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, |
2295 | const char *sysfs_name) | ||
2070 | { | 2296 | { |
2071 | int rv; | 2297 | int rv; |
2072 | struct bmc_device *bmc = intf->bmc; | 2298 | struct bmc_device *bmc = intf->bmc; |
@@ -2106,9 +2332,39 @@ static int ipmi_bmc_register(ipmi_smi_t intf) | |||
2106 | bmc->id.product_id, | 2332 | bmc->id.product_id, |
2107 | bmc->id.device_id); | 2333 | bmc->id.device_id); |
2108 | } else { | 2334 | } else { |
2109 | bmc->dev = platform_device_alloc("ipmi_bmc", | 2335 | char name[14]; |
2110 | bmc->id.device_id); | 2336 | unsigned char orig_dev_id = bmc->id.device_id; |
2337 | int warn_printed = 0; | ||
2338 | |||
2339 | snprintf(name, sizeof(name), | ||
2340 | "ipmi_bmc.%4.4x", bmc->id.product_id); | ||
2341 | |||
2342 | while (ipmi_find_bmc_prod_dev_id(&ipmidriver, | ||
2343 | bmc->id.product_id, | ||
2344 | bmc->id.device_id)) | ||
2345 | { | ||
2346 | if (!warn_printed) { | ||
2347 | printk(KERN_WARNING PFX | ||
2348 | "This machine has two different BMCs" | ||
2349 | " with the same product id and device" | ||
2350 | " id. This is an error in the" | ||
2351 | " firmware, but incrementing the" | ||
2352 | " device id to work around the problem." | ||
2353 | " Prod ID = 0x%x, Dev ID = 0x%x\n", | ||
2354 | bmc->id.product_id, bmc->id.device_id); | ||
2355 | warn_printed = 1; | ||
2356 | } | ||
2357 | bmc->id.device_id++; /* Wraps at 255 */ | ||
2358 | if (bmc->id.device_id == orig_dev_id) { | ||
2359 | printk(KERN_ERR PFX | ||
2360 | "Out of device ids!\n"); | ||
2361 | break; | ||
2362 | } | ||
2363 | } | ||
2364 | |||
2365 | bmc->dev = platform_device_alloc(name, bmc->id.device_id); | ||
2111 | if (!bmc->dev) { | 2366 | if (!bmc->dev) { |
2367 | mutex_unlock(&ipmidriver_mutex); | ||
2112 | printk(KERN_ERR | 2368 | printk(KERN_ERR |
2113 | "ipmi_msghandler:" | 2369 | "ipmi_msghandler:" |
2114 | " Unable to allocate platform device\n"); | 2370 | " Unable to allocate platform device\n"); |
@@ -2121,6 +2377,8 @@ static int ipmi_bmc_register(ipmi_smi_t intf) | |||
2121 | rv = platform_device_add(bmc->dev); | 2377 | rv = platform_device_add(bmc->dev); |
2122 | mutex_unlock(&ipmidriver_mutex); | 2378 | mutex_unlock(&ipmidriver_mutex); |
2123 | if (rv) { | 2379 | if (rv) { |
2380 | platform_device_put(bmc->dev); | ||
2381 | bmc->dev = NULL; | ||
2124 | printk(KERN_ERR | 2382 | printk(KERN_ERR |
2125 | "ipmi_msghandler:" | 2383 | "ipmi_msghandler:" |
2126 | " Unable to register bmc device: %d\n", | 2384 | " Unable to register bmc device: %d\n", |
@@ -2130,57 +2388,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf) | |||
2130 | return rv; | 2388 | return rv; |
2131 | } | 2389 | } |
2132 | 2390 | ||
2133 | bmc->device_id_attr.attr.name = "device_id"; | ||
2134 | bmc->device_id_attr.attr.owner = THIS_MODULE; | ||
2135 | bmc->device_id_attr.attr.mode = S_IRUGO; | ||
2136 | bmc->device_id_attr.show = device_id_show; | ||
2137 | |||
2138 | bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; | ||
2139 | bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE; | ||
2140 | bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; | ||
2141 | bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; | ||
2142 | |||
2143 | bmc->revision_attr.attr.name = "revision"; | ||
2144 | bmc->revision_attr.attr.owner = THIS_MODULE; | ||
2145 | bmc->revision_attr.attr.mode = S_IRUGO; | ||
2146 | bmc->revision_attr.show = revision_show; | ||
2147 | |||
2148 | bmc->firmware_rev_attr.attr.name = "firmware_revision"; | ||
2149 | bmc->firmware_rev_attr.attr.owner = THIS_MODULE; | ||
2150 | bmc->firmware_rev_attr.attr.mode = S_IRUGO; | ||
2151 | bmc->firmware_rev_attr.show = firmware_rev_show; | ||
2152 | |||
2153 | bmc->version_attr.attr.name = "ipmi_version"; | ||
2154 | bmc->version_attr.attr.owner = THIS_MODULE; | ||
2155 | bmc->version_attr.attr.mode = S_IRUGO; | ||
2156 | bmc->version_attr.show = ipmi_version_show; | ||
2157 | |||
2158 | bmc->add_dev_support_attr.attr.name | ||
2159 | = "additional_device_support"; | ||
2160 | bmc->add_dev_support_attr.attr.owner = THIS_MODULE; | ||
2161 | bmc->add_dev_support_attr.attr.mode = S_IRUGO; | ||
2162 | bmc->add_dev_support_attr.show = add_dev_support_show; | ||
2163 | |||
2164 | bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; | ||
2165 | bmc->manufacturer_id_attr.attr.owner = THIS_MODULE; | ||
2166 | bmc->manufacturer_id_attr.attr.mode = S_IRUGO; | ||
2167 | bmc->manufacturer_id_attr.show = manufacturer_id_show; | ||
2168 | |||
2169 | bmc->product_id_attr.attr.name = "product_id"; | ||
2170 | bmc->product_id_attr.attr.owner = THIS_MODULE; | ||
2171 | bmc->product_id_attr.attr.mode = S_IRUGO; | ||
2172 | bmc->product_id_attr.show = product_id_show; | ||
2173 | |||
2174 | bmc->guid_attr.attr.name = "guid"; | ||
2175 | bmc->guid_attr.attr.owner = THIS_MODULE; | ||
2176 | bmc->guid_attr.attr.mode = S_IRUGO; | ||
2177 | bmc->guid_attr.show = guid_show; | ||
2178 | |||
2179 | bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; | ||
2180 | bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE; | ||
2181 | bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; | ||
2182 | bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; | ||
2183 | |||
2184 | rv = create_files(bmc); | 2391 | rv = create_files(bmc); |
2185 | if (rv) { | 2392 | if (rv) { |
2186 | mutex_lock(&ipmidriver_mutex); | 2393 | mutex_lock(&ipmidriver_mutex); |
@@ -2202,29 +2409,44 @@ static int ipmi_bmc_register(ipmi_smi_t intf) | |||
2202 | * create symlink from system interface device to bmc device | 2409 | * create symlink from system interface device to bmc device |
2203 | * and back. | 2410 | * and back. |
2204 | */ | 2411 | */ |
2412 | intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL); | ||
2413 | if (!intf->sysfs_name) { | ||
2414 | rv = -ENOMEM; | ||
2415 | printk(KERN_ERR | ||
2416 | "ipmi_msghandler: allocate link to BMC: %d\n", | ||
2417 | rv); | ||
2418 | goto out_err; | ||
2419 | } | ||
2420 | |||
2205 | rv = sysfs_create_link(&intf->si_dev->kobj, | 2421 | rv = sysfs_create_link(&intf->si_dev->kobj, |
2206 | &bmc->dev->dev.kobj, "bmc"); | 2422 | &bmc->dev->dev.kobj, intf->sysfs_name); |
2207 | if (rv) { | 2423 | if (rv) { |
2424 | kfree(intf->sysfs_name); | ||
2425 | intf->sysfs_name = NULL; | ||
2208 | printk(KERN_ERR | 2426 | printk(KERN_ERR |
2209 | "ipmi_msghandler: Unable to create bmc symlink: %d\n", | 2427 | "ipmi_msghandler: Unable to create bmc symlink: %d\n", |
2210 | rv); | 2428 | rv); |
2211 | goto out_err; | 2429 | goto out_err; |
2212 | } | 2430 | } |
2213 | 2431 | ||
2214 | size = snprintf(dummy, 0, "ipmi%d", intf->intf_num); | 2432 | size = snprintf(dummy, 0, "ipmi%d", ifnum); |
2215 | intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); | 2433 | intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); |
2216 | if (!intf->my_dev_name) { | 2434 | if (!intf->my_dev_name) { |
2435 | kfree(intf->sysfs_name); | ||
2436 | intf->sysfs_name = NULL; | ||
2217 | rv = -ENOMEM; | 2437 | rv = -ENOMEM; |
2218 | printk(KERN_ERR | 2438 | printk(KERN_ERR |
2219 | "ipmi_msghandler: allocate link from BMC: %d\n", | 2439 | "ipmi_msghandler: allocate link from BMC: %d\n", |
2220 | rv); | 2440 | rv); |
2221 | goto out_err; | 2441 | goto out_err; |
2222 | } | 2442 | } |
2223 | snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num); | 2443 | snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum); |
2224 | 2444 | ||
2225 | rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, | 2445 | rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, |
2226 | intf->my_dev_name); | 2446 | intf->my_dev_name); |
2227 | if (rv) { | 2447 | if (rv) { |
2448 | kfree(intf->sysfs_name); | ||
2449 | intf->sysfs_name = NULL; | ||
2228 | kfree(intf->my_dev_name); | 2450 | kfree(intf->my_dev_name); |
2229 | intf->my_dev_name = NULL; | 2451 | intf->my_dev_name = NULL; |
2230 | printk(KERN_ERR | 2452 | printk(KERN_ERR |
@@ -2409,17 +2631,14 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2409 | void *send_info, | 2631 | void *send_info, |
2410 | struct ipmi_device_id *device_id, | 2632 | struct ipmi_device_id *device_id, |
2411 | struct device *si_dev, | 2633 | struct device *si_dev, |
2634 | const char *sysfs_name, | ||
2412 | unsigned char slave_addr) | 2635 | unsigned char slave_addr) |
2413 | { | 2636 | { |
2414 | int i, j; | 2637 | int i, j; |
2415 | int rv; | 2638 | int rv; |
2416 | ipmi_smi_t intf; | 2639 | ipmi_smi_t intf; |
2417 | unsigned long flags; | 2640 | ipmi_smi_t tintf; |
2418 | int version_major; | 2641 | struct list_head *link; |
2419 | int version_minor; | ||
2420 | |||
2421 | version_major = ipmi_version_major(device_id); | ||
2422 | version_minor = ipmi_version_minor(device_id); | ||
2423 | 2642 | ||
2424 | /* Make sure the driver is actually initialized, this handles | 2643 | /* Make sure the driver is actually initialized, this handles |
2425 | problems with initialization order. */ | 2644 | problems with initialization order. */ |
@@ -2437,12 +2656,16 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2437 | if (!intf) | 2656 | if (!intf) |
2438 | return -ENOMEM; | 2657 | return -ENOMEM; |
2439 | memset(intf, 0, sizeof(*intf)); | 2658 | memset(intf, 0, sizeof(*intf)); |
2659 | |||
2660 | intf->ipmi_version_major = ipmi_version_major(device_id); | ||
2661 | intf->ipmi_version_minor = ipmi_version_minor(device_id); | ||
2662 | |||
2440 | intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); | 2663 | intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); |
2441 | if (!intf->bmc) { | 2664 | if (!intf->bmc) { |
2442 | kfree(intf); | 2665 | kfree(intf); |
2443 | return -ENOMEM; | 2666 | return -ENOMEM; |
2444 | } | 2667 | } |
2445 | intf->intf_num = -1; | 2668 | intf->intf_num = -1; /* Mark it invalid for now. */ |
2446 | kref_init(&intf->refcount); | 2669 | kref_init(&intf->refcount); |
2447 | intf->bmc->id = *device_id; | 2670 | intf->bmc->id = *device_id; |
2448 | intf->si_dev = si_dev; | 2671 | intf->si_dev = si_dev; |
@@ -2470,26 +2693,30 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2470 | INIT_LIST_HEAD(&intf->waiting_events); | 2693 | INIT_LIST_HEAD(&intf->waiting_events); |
2471 | intf->waiting_events_count = 0; | 2694 | intf->waiting_events_count = 0; |
2472 | mutex_init(&intf->cmd_rcvrs_mutex); | 2695 | mutex_init(&intf->cmd_rcvrs_mutex); |
2696 | spin_lock_init(&intf->maintenance_mode_lock); | ||
2473 | INIT_LIST_HEAD(&intf->cmd_rcvrs); | 2697 | INIT_LIST_HEAD(&intf->cmd_rcvrs); |
2474 | init_waitqueue_head(&intf->waitq); | 2698 | init_waitqueue_head(&intf->waitq); |
2475 | 2699 | ||
2476 | spin_lock_init(&intf->counter_lock); | 2700 | spin_lock_init(&intf->counter_lock); |
2477 | intf->proc_dir = NULL; | 2701 | intf->proc_dir = NULL; |
2478 | 2702 | ||
2479 | rv = -ENOMEM; | 2703 | mutex_lock(&smi_watchers_mutex); |
2480 | spin_lock_irqsave(&interfaces_lock, flags); | 2704 | mutex_lock(&ipmi_interfaces_mutex); |
2481 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { | 2705 | /* Look for a hole in the numbers. */ |
2482 | if (ipmi_interfaces[i] == NULL) { | 2706 | i = 0; |
2483 | intf->intf_num = i; | 2707 | link = &ipmi_interfaces; |
2484 | /* Reserve the entry till we are done. */ | 2708 | list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) { |
2485 | ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; | 2709 | if (tintf->intf_num != i) { |
2486 | rv = 0; | 2710 | link = &tintf->link; |
2487 | break; | 2711 | break; |
2488 | } | 2712 | } |
2713 | i++; | ||
2489 | } | 2714 | } |
2490 | spin_unlock_irqrestore(&interfaces_lock, flags); | 2715 | /* Add the new interface in numeric order. */ |
2491 | if (rv) | 2716 | if (i == 0) |
2492 | goto out; | 2717 | list_add_rcu(&intf->link, &ipmi_interfaces); |
2718 | else | ||
2719 | list_add_tail_rcu(&intf->link, link); | ||
2493 | 2720 | ||
2494 | rv = handlers->start_processing(send_info, intf); | 2721 | rv = handlers->start_processing(send_info, intf); |
2495 | if (rv) | 2722 | if (rv) |
@@ -2497,8 +2724,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2497 | 2724 | ||
2498 | get_guid(intf); | 2725 | get_guid(intf); |
2499 | 2726 | ||
2500 | if ((version_major > 1) | 2727 | if ((intf->ipmi_version_major > 1) |
2501 | || ((version_major == 1) && (version_minor >= 5))) | 2728 | || ((intf->ipmi_version_major == 1) |
2729 | && (intf->ipmi_version_minor >= 5))) | ||
2502 | { | 2730 | { |
2503 | /* Start scanning the channels to see what is | 2731 | /* Start scanning the channels to see what is |
2504 | available. */ | 2732 | available. */ |
@@ -2521,64 +2749,67 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2521 | if (rv == 0) | 2749 | if (rv == 0) |
2522 | rv = add_proc_entries(intf, i); | 2750 | rv = add_proc_entries(intf, i); |
2523 | 2751 | ||
2524 | rv = ipmi_bmc_register(intf); | 2752 | rv = ipmi_bmc_register(intf, i, sysfs_name); |
2525 | 2753 | ||
2526 | out: | 2754 | out: |
2527 | if (rv) { | 2755 | if (rv) { |
2528 | if (intf->proc_dir) | 2756 | if (intf->proc_dir) |
2529 | remove_proc_entries(intf); | 2757 | remove_proc_entries(intf); |
2758 | intf->handlers = NULL; | ||
2759 | list_del_rcu(&intf->link); | ||
2760 | mutex_unlock(&ipmi_interfaces_mutex); | ||
2761 | mutex_unlock(&smi_watchers_mutex); | ||
2762 | synchronize_rcu(); | ||
2530 | kref_put(&intf->refcount, intf_free); | 2763 | kref_put(&intf->refcount, intf_free); |
2531 | if (i < MAX_IPMI_INTERFACES) { | ||
2532 | spin_lock_irqsave(&interfaces_lock, flags); | ||
2533 | ipmi_interfaces[i] = NULL; | ||
2534 | spin_unlock_irqrestore(&interfaces_lock, flags); | ||
2535 | } | ||
2536 | } else { | 2764 | } else { |
2537 | spin_lock_irqsave(&interfaces_lock, flags); | 2765 | /* After this point the interface is legal to use. */ |
2538 | ipmi_interfaces[i] = intf; | 2766 | intf->intf_num = i; |
2539 | spin_unlock_irqrestore(&interfaces_lock, flags); | 2767 | mutex_unlock(&ipmi_interfaces_mutex); |
2540 | call_smi_watchers(i, intf->si_dev); | 2768 | call_smi_watchers(i, intf->si_dev); |
2769 | mutex_unlock(&smi_watchers_mutex); | ||
2541 | } | 2770 | } |
2542 | 2771 | ||
2543 | return rv; | 2772 | return rv; |
2544 | } | 2773 | } |
2545 | 2774 | ||
2775 | static void cleanup_smi_msgs(ipmi_smi_t intf) | ||
2776 | { | ||
2777 | int i; | ||
2778 | struct seq_table *ent; | ||
2779 | |||
2780 | /* No need for locks, the interface is down. */ | ||
2781 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { | ||
2782 | ent = &(intf->seq_table[i]); | ||
2783 | if (!ent->inuse) | ||
2784 | continue; | ||
2785 | deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED); | ||
2786 | } | ||
2787 | } | ||
2788 | |||
2546 | int ipmi_unregister_smi(ipmi_smi_t intf) | 2789 | int ipmi_unregister_smi(ipmi_smi_t intf) |
2547 | { | 2790 | { |
2548 | int i; | ||
2549 | struct ipmi_smi_watcher *w; | 2791 | struct ipmi_smi_watcher *w; |
2550 | unsigned long flags; | 2792 | int intf_num = intf->intf_num; |
2551 | 2793 | ||
2552 | ipmi_bmc_unregister(intf); | 2794 | ipmi_bmc_unregister(intf); |
2553 | 2795 | ||
2554 | spin_lock_irqsave(&interfaces_lock, flags); | 2796 | mutex_lock(&smi_watchers_mutex); |
2555 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { | 2797 | mutex_lock(&ipmi_interfaces_mutex); |
2556 | if (ipmi_interfaces[i] == intf) { | 2798 | intf->intf_num = -1; |
2557 | /* Set the interface number reserved until we | 2799 | intf->handlers = NULL; |
2558 | * are done. */ | 2800 | list_del_rcu(&intf->link); |
2559 | ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; | 2801 | mutex_unlock(&ipmi_interfaces_mutex); |
2560 | intf->intf_num = -1; | 2802 | synchronize_rcu(); |
2561 | break; | ||
2562 | } | ||
2563 | } | ||
2564 | spin_unlock_irqrestore(&interfaces_lock,flags); | ||
2565 | 2803 | ||
2566 | if (i == MAX_IPMI_INTERFACES) | 2804 | cleanup_smi_msgs(intf); |
2567 | return -ENODEV; | ||
2568 | 2805 | ||
2569 | remove_proc_entries(intf); | 2806 | remove_proc_entries(intf); |
2570 | 2807 | ||
2571 | /* Call all the watcher interfaces to tell them that | 2808 | /* Call all the watcher interfaces to tell them that |
2572 | an interface is gone. */ | 2809 | an interface is gone. */ |
2573 | down_read(&smi_watchers_sem); | ||
2574 | list_for_each_entry(w, &smi_watchers, link) | 2810 | list_for_each_entry(w, &smi_watchers, link) |
2575 | w->smi_gone(i); | 2811 | w->smi_gone(intf_num); |
2576 | up_read(&smi_watchers_sem); | 2812 | mutex_unlock(&smi_watchers_mutex); |
2577 | |||
2578 | /* Allow the entry to be reused now. */ | ||
2579 | spin_lock_irqsave(&interfaces_lock, flags); | ||
2580 | ipmi_interfaces[i] = NULL; | ||
2581 | spin_unlock_irqrestore(&interfaces_lock,flags); | ||
2582 | 2813 | ||
2583 | kref_put(&intf->refcount, intf_free); | 2814 | kref_put(&intf->refcount, intf_free); |
2584 | return 0; | 2815 | return 0; |
@@ -2660,6 +2891,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2660 | struct ipmi_ipmb_addr *ipmb_addr; | 2891 | struct ipmi_ipmb_addr *ipmb_addr; |
2661 | struct ipmi_recv_msg *recv_msg; | 2892 | struct ipmi_recv_msg *recv_msg; |
2662 | unsigned long flags; | 2893 | unsigned long flags; |
2894 | struct ipmi_smi_handlers *handlers; | ||
2663 | 2895 | ||
2664 | if (msg->rsp_size < 10) { | 2896 | if (msg->rsp_size < 10) { |
2665 | /* Message not big enough, just ignore it. */ | 2897 | /* Message not big enough, just ignore it. */ |
@@ -2716,10 +2948,16 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2716 | printk("\n"); | 2948 | printk("\n"); |
2717 | } | 2949 | } |
2718 | #endif | 2950 | #endif |
2719 | intf->handlers->sender(intf->send_info, msg, 0); | 2951 | rcu_read_lock(); |
2720 | 2952 | handlers = intf->handlers; | |
2721 | rv = -1; /* We used the message, so return the value that | 2953 | if (handlers) { |
2722 | causes it to not be freed or queued. */ | 2954 | handlers->sender(intf->send_info, msg, 0); |
2955 | /* We used the message, so return the value | ||
2956 | that causes it to not be freed or | ||
2957 | queued. */ | ||
2958 | rv = -1; | ||
2959 | } | ||
2960 | rcu_read_unlock(); | ||
2723 | } else { | 2961 | } else { |
2724 | /* Deliver the message to the user. */ | 2962 | /* Deliver the message to the user. */ |
2725 | spin_lock_irqsave(&intf->counter_lock, flags); | 2963 | spin_lock_irqsave(&intf->counter_lock, flags); |
@@ -3309,16 +3547,6 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) | |||
3309 | rcu_read_unlock(); | 3547 | rcu_read_unlock(); |
3310 | } | 3548 | } |
3311 | 3549 | ||
3312 | static void | ||
3313 | handle_msg_timeout(struct ipmi_recv_msg *msg) | ||
3314 | { | ||
3315 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | ||
3316 | msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE; | ||
3317 | msg->msg.netfn |= 1; /* Convert to a response. */ | ||
3318 | msg->msg.data_len = 1; | ||
3319 | msg->msg.data = msg->msg_data; | ||
3320 | deliver_response(msg); | ||
3321 | } | ||
3322 | 3550 | ||
3323 | static struct ipmi_smi_msg * | 3551 | static struct ipmi_smi_msg * |
3324 | smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, | 3552 | smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, |
@@ -3350,7 +3578,11 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3350 | struct list_head *timeouts, long timeout_period, | 3578 | struct list_head *timeouts, long timeout_period, |
3351 | int slot, unsigned long *flags) | 3579 | int slot, unsigned long *flags) |
3352 | { | 3580 | { |
3353 | struct ipmi_recv_msg *msg; | 3581 | struct ipmi_recv_msg *msg; |
3582 | struct ipmi_smi_handlers *handlers; | ||
3583 | |||
3584 | if (intf->intf_num == -1) | ||
3585 | return; | ||
3354 | 3586 | ||
3355 | if (!ent->inuse) | 3587 | if (!ent->inuse) |
3356 | return; | 3588 | return; |
@@ -3393,13 +3625,19 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3393 | return; | 3625 | return; |
3394 | 3626 | ||
3395 | spin_unlock_irqrestore(&intf->seq_lock, *flags); | 3627 | spin_unlock_irqrestore(&intf->seq_lock, *flags); |
3628 | |||
3396 | /* Send the new message. We send with a zero | 3629 | /* Send the new message. We send with a zero |
3397 | * priority. It timed out, I doubt time is | 3630 | * priority. It timed out, I doubt time is |
3398 | * that critical now, and high priority | 3631 | * that critical now, and high priority |
3399 | * messages are really only for messages to the | 3632 | * messages are really only for messages to the |
3400 | * local MC, which don't get resent. */ | 3633 | * local MC, which don't get resent. */ |
3401 | intf->handlers->sender(intf->send_info, | 3634 | handlers = intf->handlers; |
3402 | smi_msg, 0); | 3635 | if (handlers) |
3636 | intf->handlers->sender(intf->send_info, | ||
3637 | smi_msg, 0); | ||
3638 | else | ||
3639 | ipmi_free_smi_msg(smi_msg); | ||
3640 | |||
3403 | spin_lock_irqsave(&intf->seq_lock, *flags); | 3641 | spin_lock_irqsave(&intf->seq_lock, *flags); |
3404 | } | 3642 | } |
3405 | } | 3643 | } |
@@ -3411,18 +3649,12 @@ static void ipmi_timeout_handler(long timeout_period) | |||
3411 | struct ipmi_recv_msg *msg, *msg2; | 3649 | struct ipmi_recv_msg *msg, *msg2; |
3412 | struct ipmi_smi_msg *smi_msg, *smi_msg2; | 3650 | struct ipmi_smi_msg *smi_msg, *smi_msg2; |
3413 | unsigned long flags; | 3651 | unsigned long flags; |
3414 | int i, j; | 3652 | int i; |
3415 | 3653 | ||
3416 | INIT_LIST_HEAD(&timeouts); | 3654 | INIT_LIST_HEAD(&timeouts); |
3417 | 3655 | ||
3418 | spin_lock(&interfaces_lock); | 3656 | rcu_read_lock(); |
3419 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { | 3657 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
3420 | intf = ipmi_interfaces[i]; | ||
3421 | if (IPMI_INVALID_INTERFACE(intf)) | ||
3422 | continue; | ||
3423 | kref_get(&intf->refcount); | ||
3424 | spin_unlock(&interfaces_lock); | ||
3425 | |||
3426 | /* See if any waiting messages need to be processed. */ | 3658 | /* See if any waiting messages need to be processed. */ |
3427 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | 3659 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); |
3428 | list_for_each_entry_safe(smi_msg, smi_msg2, | 3660 | list_for_each_entry_safe(smi_msg, smi_msg2, |
@@ -3442,35 +3674,60 @@ static void ipmi_timeout_handler(long timeout_period) | |||
3442 | have timed out, putting them in the timeouts | 3674 | have timed out, putting them in the timeouts |
3443 | list. */ | 3675 | list. */ |
3444 | spin_lock_irqsave(&intf->seq_lock, flags); | 3676 | spin_lock_irqsave(&intf->seq_lock, flags); |
3445 | for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) | 3677 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) |
3446 | check_msg_timeout(intf, &(intf->seq_table[j]), | 3678 | check_msg_timeout(intf, &(intf->seq_table[i]), |
3447 | &timeouts, timeout_period, j, | 3679 | &timeouts, timeout_period, i, |
3448 | &flags); | 3680 | &flags); |
3449 | spin_unlock_irqrestore(&intf->seq_lock, flags); | 3681 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
3450 | 3682 | ||
3451 | list_for_each_entry_safe(msg, msg2, &timeouts, link) | 3683 | list_for_each_entry_safe(msg, msg2, &timeouts, link) |
3452 | handle_msg_timeout(msg); | 3684 | deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); |
3453 | 3685 | ||
3454 | kref_put(&intf->refcount, intf_free); | 3686 | /* |
3455 | spin_lock(&interfaces_lock); | 3687 | * Maintenance mode handling. Check the timeout |
3688 | * optimistically before we claim the lock. It may | ||
3689 | * mean a timeout gets missed occasionally, but that | ||
3690 | * only means the timeout gets extended by one period | ||
3691 | * in that case. No big deal, and it avoids the lock | ||
3692 | * most of the time. | ||
3693 | */ | ||
3694 | if (intf->auto_maintenance_timeout > 0) { | ||
3695 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); | ||
3696 | if (intf->auto_maintenance_timeout > 0) { | ||
3697 | intf->auto_maintenance_timeout | ||
3698 | -= timeout_period; | ||
3699 | if (!intf->maintenance_mode | ||
3700 | && (intf->auto_maintenance_timeout <= 0)) | ||
3701 | { | ||
3702 | intf->maintenance_mode_enable = 0; | ||
3703 | maintenance_mode_update(intf); | ||
3704 | } | ||
3705 | } | ||
3706 | spin_unlock_irqrestore(&intf->maintenance_mode_lock, | ||
3707 | flags); | ||
3708 | } | ||
3456 | } | 3709 | } |
3457 | spin_unlock(&interfaces_lock); | 3710 | rcu_read_unlock(); |
3458 | } | 3711 | } |
3459 | 3712 | ||
3460 | static void ipmi_request_event(void) | 3713 | static void ipmi_request_event(void) |
3461 | { | 3714 | { |
3462 | ipmi_smi_t intf; | 3715 | ipmi_smi_t intf; |
3463 | int i; | 3716 | struct ipmi_smi_handlers *handlers; |
3464 | 3717 | ||
3465 | spin_lock(&interfaces_lock); | 3718 | rcu_read_lock(); |
3466 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { | 3719 | /* Called from the timer, no need to check if handlers is |
3467 | intf = ipmi_interfaces[i]; | 3720 | * valid. */ |
3468 | if (IPMI_INVALID_INTERFACE(intf)) | 3721 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
3722 | /* No event requests when in maintenance mode. */ | ||
3723 | if (intf->maintenance_mode_enable) | ||
3469 | continue; | 3724 | continue; |
3470 | 3725 | ||
3471 | intf->handlers->request_events(intf->send_info); | 3726 | handlers = intf->handlers; |
3727 | if (handlers) | ||
3728 | handlers->request_events(intf->send_info); | ||
3472 | } | 3729 | } |
3473 | spin_unlock(&interfaces_lock); | 3730 | rcu_read_unlock(); |
3474 | } | 3731 | } |
3475 | 3732 | ||
3476 | static struct timer_list ipmi_timer; | 3733 | static struct timer_list ipmi_timer; |
@@ -3599,7 +3856,6 @@ static void send_panic_events(char *str) | |||
3599 | struct kernel_ipmi_msg msg; | 3856 | struct kernel_ipmi_msg msg; |
3600 | ipmi_smi_t intf; | 3857 | ipmi_smi_t intf; |
3601 | unsigned char data[16]; | 3858 | unsigned char data[16]; |
3602 | int i; | ||
3603 | struct ipmi_system_interface_addr *si; | 3859 | struct ipmi_system_interface_addr *si; |
3604 | struct ipmi_addr addr; | 3860 | struct ipmi_addr addr; |
3605 | struct ipmi_smi_msg smi_msg; | 3861 | struct ipmi_smi_msg smi_msg; |
@@ -3633,9 +3889,9 @@ static void send_panic_events(char *str) | |||
3633 | recv_msg.done = dummy_recv_done_handler; | 3889 | recv_msg.done = dummy_recv_done_handler; |
3634 | 3890 | ||
3635 | /* For every registered interface, send the event. */ | 3891 | /* For every registered interface, send the event. */ |
3636 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { | 3892 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
3637 | intf = ipmi_interfaces[i]; | 3893 | if (!intf->handlers) |
3638 | if (IPMI_INVALID_INTERFACE(intf)) | 3894 | /* Interface is not ready. */ |
3639 | continue; | 3895 | continue; |
3640 | 3896 | ||
3641 | /* Send the event announcing the panic. */ | 3897 | /* Send the event announcing the panic. */ |
@@ -3660,13 +3916,14 @@ static void send_panic_events(char *str) | |||
3660 | if (!str) | 3916 | if (!str) |
3661 | return; | 3917 | return; |
3662 | 3918 | ||
3663 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { | 3919 | /* For every registered interface, send the event. */ |
3920 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | ||
3664 | char *p = str; | 3921 | char *p = str; |
3665 | struct ipmi_ipmb_addr *ipmb; | 3922 | struct ipmi_ipmb_addr *ipmb; |
3666 | int j; | 3923 | int j; |
3667 | 3924 | ||
3668 | intf = ipmi_interfaces[i]; | 3925 | if (intf->intf_num == -1) |
3669 | if (IPMI_INVALID_INTERFACE(intf)) | 3926 | /* Interface was not ready yet. */ |
3670 | continue; | 3927 | continue; |
3671 | 3928 | ||
3672 | /* First job here is to figure out where to send the | 3929 | /* First job here is to figure out where to send the |
@@ -3792,7 +4049,6 @@ static int panic_event(struct notifier_block *this, | |||
3792 | unsigned long event, | 4049 | unsigned long event, |
3793 | void *ptr) | 4050 | void *ptr) |
3794 | { | 4051 | { |
3795 | int i; | ||
3796 | ipmi_smi_t intf; | 4052 | ipmi_smi_t intf; |
3797 | 4053 | ||
3798 | if (has_panicked) | 4054 | if (has_panicked) |
@@ -3800,9 +4056,9 @@ static int panic_event(struct notifier_block *this, | |||
3800 | has_panicked = 1; | 4056 | has_panicked = 1; |
3801 | 4057 | ||
3802 | /* For every registered interface, set it to run to completion. */ | 4058 | /* For every registered interface, set it to run to completion. */ |
3803 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { | 4059 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
3804 | intf = ipmi_interfaces[i]; | 4060 | if (!intf->handlers) |
3805 | if (IPMI_INVALID_INTERFACE(intf)) | 4061 | /* Interface is not ready. */ |
3806 | continue; | 4062 | continue; |
3807 | 4063 | ||
3808 | intf->handlers->set_run_to_completion(intf->send_info, 1); | 4064 | intf->handlers->set_run_to_completion(intf->send_info, 1); |
@@ -3823,7 +4079,6 @@ static struct notifier_block panic_block = { | |||
3823 | 4079 | ||
3824 | static int ipmi_init_msghandler(void) | 4080 | static int ipmi_init_msghandler(void) |
3825 | { | 4081 | { |
3826 | int i; | ||
3827 | int rv; | 4082 | int rv; |
3828 | 4083 | ||
3829 | if (initialized) | 4084 | if (initialized) |
@@ -3838,9 +4093,6 @@ static int ipmi_init_msghandler(void) | |||
3838 | printk(KERN_INFO "ipmi message handler version " | 4093 | printk(KERN_INFO "ipmi message handler version " |
3839 | IPMI_DRIVER_VERSION "\n"); | 4094 | IPMI_DRIVER_VERSION "\n"); |
3840 | 4095 | ||
3841 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) | ||
3842 | ipmi_interfaces[i] = NULL; | ||
3843 | |||
3844 | #ifdef CONFIG_PROC_FS | 4096 | #ifdef CONFIG_PROC_FS |
3845 | proc_ipmi_root = proc_mkdir("ipmi", NULL); | 4097 | proc_ipmi_root = proc_mkdir("ipmi", NULL); |
3846 | if (!proc_ipmi_root) { | 4098 | if (!proc_ipmi_root) { |
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index 8d941db83457..597eb4f88b84 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c | |||
@@ -43,6 +43,9 @@ | |||
43 | 43 | ||
44 | #define PFX "IPMI poweroff: " | 44 | #define PFX "IPMI poweroff: " |
45 | 45 | ||
46 | static void ipmi_po_smi_gone(int if_num); | ||
47 | static void ipmi_po_new_smi(int if_num, struct device *device); | ||
48 | |||
46 | /* Definitions for controlling power off (if the system supports it). It | 49 | /* Definitions for controlling power off (if the system supports it). It |
47 | * conveniently matches the IPMI chassis control values. */ | 50 | * conveniently matches the IPMI chassis control values. */ |
48 | #define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */ | 51 | #define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */ |
@@ -51,6 +54,37 @@ | |||
51 | /* the IPMI data command */ | 54 | /* the IPMI data command */ |
52 | static int poweroff_powercycle; | 55 | static int poweroff_powercycle; |
53 | 56 | ||
57 | /* Which interface to use, -1 means the first we see. */ | ||
58 | static int ifnum_to_use = -1; | ||
59 | |||
60 | /* Our local state. */ | ||
61 | static int ready = 0; | ||
62 | static ipmi_user_t ipmi_user; | ||
63 | static int ipmi_ifnum; | ||
64 | static void (*specific_poweroff_func)(ipmi_user_t user) = NULL; | ||
65 | |||
66 | /* Holds the old poweroff function so we can restore it on removal. */ | ||
67 | static void (*old_poweroff_func)(void); | ||
68 | |||
69 | static int set_param_ifnum(const char *val, struct kernel_param *kp) | ||
70 | { | ||
71 | int rv = param_set_int(val, kp); | ||
72 | if (rv) | ||
73 | return rv; | ||
74 | if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum)) | ||
75 | return 0; | ||
76 | |||
77 | ipmi_po_smi_gone(ipmi_ifnum); | ||
78 | ipmi_po_new_smi(ifnum_to_use, NULL); | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | module_param_call(ifnum_to_use, set_param_ifnum, param_get_int, | ||
83 | &ifnum_to_use, 0644); | ||
84 | MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " | ||
85 | "timer. Setting to -1 defaults to the first registered " | ||
86 | "interface"); | ||
87 | |||
54 | /* parameter definition to allow user to flag power cycle */ | 88 | /* parameter definition to allow user to flag power cycle */ |
55 | module_param(poweroff_powercycle, int, 0644); | 89 | module_param(poweroff_powercycle, int, 0644); |
56 | MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); | 90 | MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); |
@@ -142,6 +176,42 @@ static int ipmi_request_in_rc_mode(ipmi_user_t user, | |||
142 | #define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01 | 176 | #define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01 |
143 | #define IPMI_PICMG_ID 0 | 177 | #define IPMI_PICMG_ID 0 |
144 | 178 | ||
179 | #define IPMI_NETFN_OEM 0x2e | ||
180 | #define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11 | ||
181 | #define IPMI_ATCA_PPS_IANA "\x00\x40\x0A" | ||
182 | #define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1 | ||
183 | #define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051 | ||
184 | |||
185 | static void (*atca_oem_poweroff_hook)(ipmi_user_t user) = NULL; | ||
186 | |||
187 | static void pps_poweroff_atca (ipmi_user_t user) | ||
188 | { | ||
189 | struct ipmi_system_interface_addr smi_addr; | ||
190 | struct kernel_ipmi_msg send_msg; | ||
191 | int rv; | ||
192 | /* | ||
193 | * Configure IPMI address for local access | ||
194 | */ | ||
195 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | ||
196 | smi_addr.channel = IPMI_BMC_CHANNEL; | ||
197 | smi_addr.lun = 0; | ||
198 | |||
199 | printk(KERN_INFO PFX "PPS powerdown hook used"); | ||
200 | |||
201 | send_msg.netfn = IPMI_NETFN_OEM; | ||
202 | send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART; | ||
203 | send_msg.data = IPMI_ATCA_PPS_IANA; | ||
204 | send_msg.data_len = 3; | ||
205 | rv = ipmi_request_in_rc_mode(user, | ||
206 | (struct ipmi_addr *) &smi_addr, | ||
207 | &send_msg); | ||
208 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { | ||
209 | printk(KERN_ERR PFX "Unable to send ATCA ," | ||
210 | " IPMI error 0x%x\n", rv); | ||
211 | } | ||
212 | return; | ||
213 | } | ||
214 | |||
145 | static int ipmi_atca_detect (ipmi_user_t user) | 215 | static int ipmi_atca_detect (ipmi_user_t user) |
146 | { | 216 | { |
147 | struct ipmi_system_interface_addr smi_addr; | 217 | struct ipmi_system_interface_addr smi_addr; |
@@ -167,6 +237,13 @@ static int ipmi_atca_detect (ipmi_user_t user) | |||
167 | rv = ipmi_request_wait_for_response(user, | 237 | rv = ipmi_request_wait_for_response(user, |
168 | (struct ipmi_addr *) &smi_addr, | 238 | (struct ipmi_addr *) &smi_addr, |
169 | &send_msg); | 239 | &send_msg); |
240 | |||
241 | printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id); | ||
242 | if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID) | ||
243 | && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) { | ||
244 | printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n"); | ||
245 | atca_oem_poweroff_hook = pps_poweroff_atca; | ||
246 | } | ||
170 | return !rv; | 247 | return !rv; |
171 | } | 248 | } |
172 | 249 | ||
@@ -200,12 +277,19 @@ static void ipmi_poweroff_atca (ipmi_user_t user) | |||
200 | rv = ipmi_request_in_rc_mode(user, | 277 | rv = ipmi_request_in_rc_mode(user, |
201 | (struct ipmi_addr *) &smi_addr, | 278 | (struct ipmi_addr *) &smi_addr, |
202 | &send_msg); | 279 | &send_msg); |
203 | if (rv) { | 280 | /** At this point, the system may be shutting down, and most |
281 | ** serial drivers (if used) will have interrupts turned off | ||
282 | ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE | ||
283 | ** return code | ||
284 | **/ | ||
285 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { | ||
204 | printk(KERN_ERR PFX "Unable to send ATCA powerdown message," | 286 | printk(KERN_ERR PFX "Unable to send ATCA powerdown message," |
205 | " IPMI error 0x%x\n", rv); | 287 | " IPMI error 0x%x\n", rv); |
206 | goto out; | 288 | goto out; |
207 | } | 289 | } |
208 | 290 | ||
291 | if(atca_oem_poweroff_hook) | ||
292 | return atca_oem_poweroff_hook(user); | ||
209 | out: | 293 | out: |
210 | return; | 294 | return; |
211 | } | 295 | } |
@@ -440,15 +524,6 @@ static struct poweroff_function poweroff_functions[] = { | |||
440 | / sizeof(struct poweroff_function)) | 524 | / sizeof(struct poweroff_function)) |
441 | 525 | ||
442 | 526 | ||
443 | /* Our local state. */ | ||
444 | static int ready = 0; | ||
445 | static ipmi_user_t ipmi_user; | ||
446 | static void (*specific_poweroff_func)(ipmi_user_t user) = NULL; | ||
447 | |||
448 | /* Holds the old poweroff function so we can restore it on removal. */ | ||
449 | static void (*old_poweroff_func)(void); | ||
450 | |||
451 | |||
452 | /* Called on a powerdown request. */ | 527 | /* Called on a powerdown request. */ |
453 | static void ipmi_poweroff_function (void) | 528 | static void ipmi_poweroff_function (void) |
454 | { | 529 | { |
@@ -473,6 +548,9 @@ static void ipmi_po_new_smi(int if_num, struct device *device) | |||
473 | if (ready) | 548 | if (ready) |
474 | return; | 549 | return; |
475 | 550 | ||
551 | if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num)) | ||
552 | return; | ||
553 | |||
476 | rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL, | 554 | rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL, |
477 | &ipmi_user); | 555 | &ipmi_user); |
478 | if (rv) { | 556 | if (rv) { |
@@ -481,6 +559,8 @@ static void ipmi_po_new_smi(int if_num, struct device *device) | |||
481 | return; | 559 | return; |
482 | } | 560 | } |
483 | 561 | ||
562 | ipmi_ifnum = if_num; | ||
563 | |||
484 | /* | 564 | /* |
485 | * Do a get device ide and store some results, since this is | 565 | * Do a get device ide and store some results, since this is |
486 | * used by several functions. | 566 | * used by several functions. |
@@ -541,9 +621,15 @@ static void ipmi_po_new_smi(int if_num, struct device *device) | |||
541 | 621 | ||
542 | static void ipmi_po_smi_gone(int if_num) | 622 | static void ipmi_po_smi_gone(int if_num) |
543 | { | 623 | { |
544 | /* This can never be called, because once poweroff driver is | 624 | if (!ready) |
545 | registered, the interface can't go away until the power | 625 | return; |
546 | driver is unregistered. */ | 626 | |
627 | if (ipmi_ifnum != if_num) | ||
628 | return; | ||
629 | |||
630 | ready = 0; | ||
631 | ipmi_destroy_user(ipmi_user); | ||
632 | pm_power_off = old_poweroff_func; | ||
547 | } | 633 | } |
548 | 634 | ||
549 | static struct ipmi_smi_watcher smi_watcher = | 635 | static struct ipmi_smi_watcher smi_watcher = |
@@ -616,9 +702,9 @@ static int ipmi_poweroff_init (void) | |||
616 | printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv); | 702 | printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv); |
617 | goto out_err; | 703 | goto out_err; |
618 | } | 704 | } |
619 | #endif | ||
620 | 705 | ||
621 | out_err: | 706 | out_err: |
707 | #endif | ||
622 | return rv; | 708 | return rv; |
623 | } | 709 | } |
624 | 710 | ||
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index bb1fac104fda..81a0c89598e7 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -61,6 +61,10 @@ | |||
61 | #include "ipmi_si_sm.h" | 61 | #include "ipmi_si_sm.h" |
62 | #include <linux/init.h> | 62 | #include <linux/init.h> |
63 | #include <linux/dmi.h> | 63 | #include <linux/dmi.h> |
64 | #include <linux/string.h> | ||
65 | #include <linux/ctype.h> | ||
66 | |||
67 | #define PFX "ipmi_si: " | ||
64 | 68 | ||
65 | /* Measure times between events in the driver. */ | 69 | /* Measure times between events in the driver. */ |
66 | #undef DEBUG_TIMING | 70 | #undef DEBUG_TIMING |
@@ -92,7 +96,7 @@ enum si_intf_state { | |||
92 | enum si_type { | 96 | enum si_type { |
93 | SI_KCS, SI_SMIC, SI_BT | 97 | SI_KCS, SI_SMIC, SI_BT |
94 | }; | 98 | }; |
95 | static char *si_to_str[] = { "KCS", "SMIC", "BT" }; | 99 | static char *si_to_str[] = { "kcs", "smic", "bt" }; |
96 | 100 | ||
97 | #define DEVICE_NAME "ipmi_si" | 101 | #define DEVICE_NAME "ipmi_si" |
98 | 102 | ||
@@ -222,7 +226,10 @@ struct smi_info | |||
222 | static int force_kipmid[SI_MAX_PARMS]; | 226 | static int force_kipmid[SI_MAX_PARMS]; |
223 | static int num_force_kipmid; | 227 | static int num_force_kipmid; |
224 | 228 | ||
229 | static int unload_when_empty = 1; | ||
230 | |||
225 | static int try_smi_init(struct smi_info *smi); | 231 | static int try_smi_init(struct smi_info *smi); |
232 | static void cleanup_one_si(struct smi_info *to_clean); | ||
226 | 233 | ||
227 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); | 234 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); |
228 | static int register_xaction_notifier(struct notifier_block * nb) | 235 | static int register_xaction_notifier(struct notifier_block * nb) |
@@ -240,14 +247,18 @@ static void deliver_recv_msg(struct smi_info *smi_info, | |||
240 | spin_lock(&(smi_info->si_lock)); | 247 | spin_lock(&(smi_info->si_lock)); |
241 | } | 248 | } |
242 | 249 | ||
243 | static void return_hosed_msg(struct smi_info *smi_info) | 250 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) |
244 | { | 251 | { |
245 | struct ipmi_smi_msg *msg = smi_info->curr_msg; | 252 | struct ipmi_smi_msg *msg = smi_info->curr_msg; |
246 | 253 | ||
254 | if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) | ||
255 | cCode = IPMI_ERR_UNSPECIFIED; | ||
256 | /* else use it as is */ | ||
257 | |||
247 | /* Make it a reponse */ | 258 | /* Make it a reponse */ |
248 | msg->rsp[0] = msg->data[0] | 4; | 259 | msg->rsp[0] = msg->data[0] | 4; |
249 | msg->rsp[1] = msg->data[1]; | 260 | msg->rsp[1] = msg->data[1]; |
250 | msg->rsp[2] = 0xFF; /* Unknown error. */ | 261 | msg->rsp[2] = cCode; |
251 | msg->rsp_size = 3; | 262 | msg->rsp_size = 3; |
252 | 263 | ||
253 | smi_info->curr_msg = NULL; | 264 | smi_info->curr_msg = NULL; |
@@ -298,7 +309,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
298 | smi_info->curr_msg->data, | 309 | smi_info->curr_msg->data, |
299 | smi_info->curr_msg->data_size); | 310 | smi_info->curr_msg->data_size); |
300 | if (err) { | 311 | if (err) { |
301 | return_hosed_msg(smi_info); | 312 | return_hosed_msg(smi_info, err); |
302 | } | 313 | } |
303 | 314 | ||
304 | rv = SI_SM_CALL_WITHOUT_DELAY; | 315 | rv = SI_SM_CALL_WITHOUT_DELAY; |
@@ -640,7 +651,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
640 | /* If we were handling a user message, format | 651 | /* If we were handling a user message, format |
641 | a response to send to the upper layer to | 652 | a response to send to the upper layer to |
642 | tell it about the error. */ | 653 | tell it about the error. */ |
643 | return_hosed_msg(smi_info); | 654 | return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); |
644 | } | 655 | } |
645 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 656 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
646 | } | 657 | } |
@@ -684,22 +695,24 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
684 | { | 695 | { |
685 | /* We are idle and the upper layer requested that I fetch | 696 | /* We are idle and the upper layer requested that I fetch |
686 | events, so do so. */ | 697 | events, so do so. */ |
687 | unsigned char msg[2]; | 698 | atomic_set(&smi_info->req_events, 0); |
688 | 699 | ||
689 | spin_lock(&smi_info->count_lock); | 700 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |
690 | smi_info->flag_fetches++; | 701 | if (!smi_info->curr_msg) |
691 | spin_unlock(&smi_info->count_lock); | 702 | goto out; |
692 | 703 | ||
693 | atomic_set(&smi_info->req_events, 0); | 704 | smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); |
694 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 705 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; |
695 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; | 706 | smi_info->curr_msg->data_size = 2; |
696 | 707 | ||
697 | smi_info->handlers->start_transaction( | 708 | smi_info->handlers->start_transaction( |
698 | smi_info->si_sm, msg, 2); | 709 | smi_info->si_sm, |
699 | smi_info->si_state = SI_GETTING_FLAGS; | 710 | smi_info->curr_msg->data, |
711 | smi_info->curr_msg->data_size); | ||
712 | smi_info->si_state = SI_GETTING_EVENTS; | ||
700 | goto restart; | 713 | goto restart; |
701 | } | 714 | } |
702 | 715 | out: | |
703 | return si_sm_result; | 716 | return si_sm_result; |
704 | } | 717 | } |
705 | 718 | ||
@@ -714,6 +727,15 @@ static void sender(void *send_info, | |||
714 | struct timeval t; | 727 | struct timeval t; |
715 | #endif | 728 | #endif |
716 | 729 | ||
730 | if (atomic_read(&smi_info->stop_operation)) { | ||
731 | msg->rsp[0] = msg->data[0] | 4; | ||
732 | msg->rsp[1] = msg->data[1]; | ||
733 | msg->rsp[2] = IPMI_ERR_UNSPECIFIED; | ||
734 | msg->rsp_size = 3; | ||
735 | deliver_recv_msg(smi_info, msg); | ||
736 | return; | ||
737 | } | ||
738 | |||
717 | spin_lock_irqsave(&(smi_info->msg_lock), flags); | 739 | spin_lock_irqsave(&(smi_info->msg_lock), flags); |
718 | #ifdef DEBUG_TIMING | 740 | #ifdef DEBUG_TIMING |
719 | do_gettimeofday(&t); | 741 | do_gettimeofday(&t); |
@@ -805,13 +827,21 @@ static void poll(void *send_info) | |||
805 | { | 827 | { |
806 | struct smi_info *smi_info = send_info; | 828 | struct smi_info *smi_info = send_info; |
807 | 829 | ||
808 | smi_event_handler(smi_info, 0); | 830 | /* |
831 | * Make sure there is some delay in the poll loop so we can | ||
832 | * drive time forward and timeout things. | ||
833 | */ | ||
834 | udelay(10); | ||
835 | smi_event_handler(smi_info, 10); | ||
809 | } | 836 | } |
810 | 837 | ||
811 | static void request_events(void *send_info) | 838 | static void request_events(void *send_info) |
812 | { | 839 | { |
813 | struct smi_info *smi_info = send_info; | 840 | struct smi_info *smi_info = send_info; |
814 | 841 | ||
842 | if (atomic_read(&smi_info->stop_operation)) | ||
843 | return; | ||
844 | |||
815 | atomic_set(&smi_info->req_events, 1); | 845 | atomic_set(&smi_info->req_events, 1); |
816 | } | 846 | } |
817 | 847 | ||
@@ -949,12 +979,21 @@ static int smi_start_processing(void *send_info, | |||
949 | return 0; | 979 | return 0; |
950 | } | 980 | } |
951 | 981 | ||
982 | static void set_maintenance_mode(void *send_info, int enable) | ||
983 | { | ||
984 | struct smi_info *smi_info = send_info; | ||
985 | |||
986 | if (!enable) | ||
987 | atomic_set(&smi_info->req_events, 0); | ||
988 | } | ||
989 | |||
952 | static struct ipmi_smi_handlers handlers = | 990 | static struct ipmi_smi_handlers handlers = |
953 | { | 991 | { |
954 | .owner = THIS_MODULE, | 992 | .owner = THIS_MODULE, |
955 | .start_processing = smi_start_processing, | 993 | .start_processing = smi_start_processing, |
956 | .sender = sender, | 994 | .sender = sender, |
957 | .request_events = request_events, | 995 | .request_events = request_events, |
996 | .set_maintenance_mode = set_maintenance_mode, | ||
958 | .set_run_to_completion = set_run_to_completion, | 997 | .set_run_to_completion = set_run_to_completion, |
959 | .poll = poll, | 998 | .poll = poll, |
960 | }; | 999 | }; |
@@ -987,6 +1026,16 @@ static int num_regshifts = 0; | |||
987 | static int slave_addrs[SI_MAX_PARMS]; | 1026 | static int slave_addrs[SI_MAX_PARMS]; |
988 | static int num_slave_addrs = 0; | 1027 | static int num_slave_addrs = 0; |
989 | 1028 | ||
1029 | #define IPMI_IO_ADDR_SPACE 0 | ||
1030 | #define IPMI_MEM_ADDR_SPACE 1 | ||
1031 | static char *addr_space_to_str[] = { "I/O", "mem" }; | ||
1032 | |||
1033 | static int hotmod_handler(const char *val, struct kernel_param *kp); | ||
1034 | |||
1035 | module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200); | ||
1036 | MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See" | ||
1037 | " Documentation/IPMI.txt in the kernel sources for the" | ||
1038 | " gory details."); | ||
990 | 1039 | ||
991 | module_param_named(trydefaults, si_trydefaults, bool, 0); | 1040 | module_param_named(trydefaults, si_trydefaults, bool, 0); |
992 | MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" | 1041 | MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" |
@@ -1038,12 +1087,12 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0); | |||
1038 | MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" | 1087 | MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" |
1039 | " disabled(0). Normally the IPMI driver auto-detects" | 1088 | " disabled(0). Normally the IPMI driver auto-detects" |
1040 | " this, but the value may be overridden by this parm."); | 1089 | " this, but the value may be overridden by this parm."); |
1090 | module_param(unload_when_empty, int, 0); | ||
1091 | MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" | ||
1092 | " specified or found, default is 1. Setting to 0" | ||
1093 | " is useful for hot add of devices using hotmod."); | ||
1041 | 1094 | ||
1042 | 1095 | ||
1043 | #define IPMI_IO_ADDR_SPACE 0 | ||
1044 | #define IPMI_MEM_ADDR_SPACE 1 | ||
1045 | static char *addr_space_to_str[] = { "I/O", "memory" }; | ||
1046 | |||
1047 | static void std_irq_cleanup(struct smi_info *info) | 1096 | static void std_irq_cleanup(struct smi_info *info) |
1048 | { | 1097 | { |
1049 | if (info->si_type == SI_BT) | 1098 | if (info->si_type == SI_BT) |
@@ -1317,6 +1366,234 @@ static int mem_setup(struct smi_info *info) | |||
1317 | return 0; | 1366 | return 0; |
1318 | } | 1367 | } |
1319 | 1368 | ||
1369 | /* | ||
1370 | * Parms come in as <op1>[:op2[:op3...]]. ops are: | ||
1371 | * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]] | ||
1372 | * Options are: | ||
1373 | * rsp=<regspacing> | ||
1374 | * rsi=<regsize> | ||
1375 | * rsh=<regshift> | ||
1376 | * irq=<irq> | ||
1377 | * ipmb=<ipmb addr> | ||
1378 | */ | ||
1379 | enum hotmod_op { HM_ADD, HM_REMOVE }; | ||
1380 | struct hotmod_vals { | ||
1381 | char *name; | ||
1382 | int val; | ||
1383 | }; | ||
1384 | static struct hotmod_vals hotmod_ops[] = { | ||
1385 | { "add", HM_ADD }, | ||
1386 | { "remove", HM_REMOVE }, | ||
1387 | { NULL } | ||
1388 | }; | ||
1389 | static struct hotmod_vals hotmod_si[] = { | ||
1390 | { "kcs", SI_KCS }, | ||
1391 | { "smic", SI_SMIC }, | ||
1392 | { "bt", SI_BT }, | ||
1393 | { NULL } | ||
1394 | }; | ||
1395 | static struct hotmod_vals hotmod_as[] = { | ||
1396 | { "mem", IPMI_MEM_ADDR_SPACE }, | ||
1397 | { "i/o", IPMI_IO_ADDR_SPACE }, | ||
1398 | { NULL } | ||
1399 | }; | ||
1400 | static int ipmi_strcasecmp(const char *s1, const char *s2) | ||
1401 | { | ||
1402 | while (*s1 || *s2) { | ||
1403 | if (!*s1) | ||
1404 | return -1; | ||
1405 | if (!*s2) | ||
1406 | return 1; | ||
1407 | if (*s1 != *s2) | ||
1408 | return *s1 - *s2; | ||
1409 | s1++; | ||
1410 | s2++; | ||
1411 | } | ||
1412 | return 0; | ||
1413 | } | ||
1414 | static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr) | ||
1415 | { | ||
1416 | char *s; | ||
1417 | int i; | ||
1418 | |||
1419 | s = strchr(*curr, ','); | ||
1420 | if (!s) { | ||
1421 | printk(KERN_WARNING PFX "No hotmod %s given.\n", name); | ||
1422 | return -EINVAL; | ||
1423 | } | ||
1424 | *s = '\0'; | ||
1425 | s++; | ||
1426 | for (i = 0; hotmod_ops[i].name; i++) { | ||
1427 | if (ipmi_strcasecmp(*curr, v[i].name) == 0) { | ||
1428 | *val = v[i].val; | ||
1429 | *curr = s; | ||
1430 | return 0; | ||
1431 | } | ||
1432 | } | ||
1433 | |||
1434 | printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr); | ||
1435 | return -EINVAL; | ||
1436 | } | ||
1437 | |||
1438 | static int hotmod_handler(const char *val, struct kernel_param *kp) | ||
1439 | { | ||
1440 | char *str = kstrdup(val, GFP_KERNEL); | ||
1441 | int rv = -EINVAL; | ||
1442 | char *next, *curr, *s, *n, *o; | ||
1443 | enum hotmod_op op; | ||
1444 | enum si_type si_type; | ||
1445 | int addr_space; | ||
1446 | unsigned long addr; | ||
1447 | int regspacing; | ||
1448 | int regsize; | ||
1449 | int regshift; | ||
1450 | int irq; | ||
1451 | int ipmb; | ||
1452 | int ival; | ||
1453 | struct smi_info *info; | ||
1454 | |||
1455 | if (!str) | ||
1456 | return -ENOMEM; | ||
1457 | |||
1458 | /* Kill any trailing spaces, as we can get a "\n" from echo. */ | ||
1459 | ival = strlen(str) - 1; | ||
1460 | while ((ival >= 0) && isspace(str[ival])) { | ||
1461 | str[ival] = '\0'; | ||
1462 | ival--; | ||
1463 | } | ||
1464 | |||
1465 | for (curr = str; curr; curr = next) { | ||
1466 | regspacing = 1; | ||
1467 | regsize = 1; | ||
1468 | regshift = 0; | ||
1469 | irq = 0; | ||
1470 | ipmb = 0x20; | ||
1471 | |||
1472 | next = strchr(curr, ':'); | ||
1473 | if (next) { | ||
1474 | *next = '\0'; | ||
1475 | next++; | ||
1476 | } | ||
1477 | |||
1478 | rv = parse_str(hotmod_ops, &ival, "operation", &curr); | ||
1479 | if (rv) | ||
1480 | break; | ||
1481 | op = ival; | ||
1482 | |||
1483 | rv = parse_str(hotmod_si, &ival, "interface type", &curr); | ||
1484 | if (rv) | ||
1485 | break; | ||
1486 | si_type = ival; | ||
1487 | |||
1488 | rv = parse_str(hotmod_as, &addr_space, "address space", &curr); | ||
1489 | if (rv) | ||
1490 | break; | ||
1491 | |||
1492 | s = strchr(curr, ','); | ||
1493 | if (s) { | ||
1494 | *s = '\0'; | ||
1495 | s++; | ||
1496 | } | ||
1497 | addr = simple_strtoul(curr, &n, 0); | ||
1498 | if ((*n != '\0') || (*curr == '\0')) { | ||
1499 | printk(KERN_WARNING PFX "Invalid hotmod address" | ||
1500 | " '%s'\n", curr); | ||
1501 | break; | ||
1502 | } | ||
1503 | |||
1504 | while (s) { | ||
1505 | curr = s; | ||
1506 | s = strchr(curr, ','); | ||
1507 | if (s) { | ||
1508 | *s = '\0'; | ||
1509 | s++; | ||
1510 | } | ||
1511 | o = strchr(curr, '='); | ||
1512 | if (o) { | ||
1513 | *o = '\0'; | ||
1514 | o++; | ||
1515 | } | ||
1516 | #define HOTMOD_INT_OPT(name, val) \ | ||
1517 | if (ipmi_strcasecmp(curr, name) == 0) { \ | ||
1518 | if (!o) { \ | ||
1519 | printk(KERN_WARNING PFX \ | ||
1520 | "No option given for '%s'\n", \ | ||
1521 | curr); \ | ||
1522 | goto out; \ | ||
1523 | } \ | ||
1524 | val = simple_strtoul(o, &n, 0); \ | ||
1525 | if ((*n != '\0') || (*o == '\0')) { \ | ||
1526 | printk(KERN_WARNING PFX \ | ||
1527 | "Bad option given for '%s'\n", \ | ||
1528 | curr); \ | ||
1529 | goto out; \ | ||
1530 | } \ | ||
1531 | } | ||
1532 | |||
1533 | HOTMOD_INT_OPT("rsp", regspacing) | ||
1534 | else HOTMOD_INT_OPT("rsi", regsize) | ||
1535 | else HOTMOD_INT_OPT("rsh", regshift) | ||
1536 | else HOTMOD_INT_OPT("irq", irq) | ||
1537 | else HOTMOD_INT_OPT("ipmb", ipmb) | ||
1538 | else { | ||
1539 | printk(KERN_WARNING PFX | ||
1540 | "Invalid hotmod option '%s'\n", | ||
1541 | curr); | ||
1542 | goto out; | ||
1543 | } | ||
1544 | #undef HOTMOD_INT_OPT | ||
1545 | } | ||
1546 | |||
1547 | if (op == HM_ADD) { | ||
1548 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
1549 | if (!info) { | ||
1550 | rv = -ENOMEM; | ||
1551 | goto out; | ||
1552 | } | ||
1553 | |||
1554 | info->addr_source = "hotmod"; | ||
1555 | info->si_type = si_type; | ||
1556 | info->io.addr_data = addr; | ||
1557 | info->io.addr_type = addr_space; | ||
1558 | if (addr_space == IPMI_MEM_ADDR_SPACE) | ||
1559 | info->io_setup = mem_setup; | ||
1560 | else | ||
1561 | info->io_setup = port_setup; | ||
1562 | |||
1563 | info->io.addr = NULL; | ||
1564 | info->io.regspacing = regspacing; | ||
1565 | if (!info->io.regspacing) | ||
1566 | info->io.regspacing = DEFAULT_REGSPACING; | ||
1567 | info->io.regsize = regsize; | ||
1568 | if (!info->io.regsize) | ||
1569 | info->io.regsize = DEFAULT_REGSPACING; | ||
1570 | info->io.regshift = regshift; | ||
1571 | info->irq = irq; | ||
1572 | if (info->irq) | ||
1573 | info->irq_setup = std_irq_setup; | ||
1574 | info->slave_addr = ipmb; | ||
1575 | |||
1576 | try_smi_init(info); | ||
1577 | } else { | ||
1578 | /* remove */ | ||
1579 | struct smi_info *e, *tmp_e; | ||
1580 | |||
1581 | mutex_lock(&smi_infos_lock); | ||
1582 | list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { | ||
1583 | if (e->io.addr_type != addr_space) | ||
1584 | continue; | ||
1585 | if (e->si_type != si_type) | ||
1586 | continue; | ||
1587 | if (e->io.addr_data == addr) | ||
1588 | cleanup_one_si(e); | ||
1589 | } | ||
1590 | mutex_unlock(&smi_infos_lock); | ||
1591 | } | ||
1592 | } | ||
1593 | out: | ||
1594 | kfree(str); | ||
1595 | return rv; | ||
1596 | } | ||
1320 | 1597 | ||
1321 | static __devinit void hardcode_find_bmc(void) | 1598 | static __devinit void hardcode_find_bmc(void) |
1322 | { | 1599 | { |
@@ -1333,11 +1610,11 @@ static __devinit void hardcode_find_bmc(void) | |||
1333 | 1610 | ||
1334 | info->addr_source = "hardcoded"; | 1611 | info->addr_source = "hardcoded"; |
1335 | 1612 | ||
1336 | if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { | 1613 | if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) { |
1337 | info->si_type = SI_KCS; | 1614 | info->si_type = SI_KCS; |
1338 | } else if (strcmp(si_type[i], "smic") == 0) { | 1615 | } else if (ipmi_strcasecmp(si_type[i], "smic") == 0) { |
1339 | info->si_type = SI_SMIC; | 1616 | info->si_type = SI_SMIC; |
1340 | } else if (strcmp(si_type[i], "bt") == 0) { | 1617 | } else if (ipmi_strcasecmp(si_type[i], "bt") == 0) { |
1341 | info->si_type = SI_BT; | 1618 | info->si_type = SI_BT; |
1342 | } else { | 1619 | } else { |
1343 | printk(KERN_WARNING | 1620 | printk(KERN_WARNING |
@@ -1952,19 +2229,9 @@ static int try_get_dev_id(struct smi_info *smi_info) | |||
1952 | static int type_file_read_proc(char *page, char **start, off_t off, | 2229 | static int type_file_read_proc(char *page, char **start, off_t off, |
1953 | int count, int *eof, void *data) | 2230 | int count, int *eof, void *data) |
1954 | { | 2231 | { |
1955 | char *out = (char *) page; | ||
1956 | struct smi_info *smi = data; | 2232 | struct smi_info *smi = data; |
1957 | 2233 | ||
1958 | switch (smi->si_type) { | 2234 | return sprintf(page, "%s\n", si_to_str[smi->si_type]); |
1959 | case SI_KCS: | ||
1960 | return sprintf(out, "kcs\n"); | ||
1961 | case SI_SMIC: | ||
1962 | return sprintf(out, "smic\n"); | ||
1963 | case SI_BT: | ||
1964 | return sprintf(out, "bt\n"); | ||
1965 | default: | ||
1966 | return 0; | ||
1967 | } | ||
1968 | } | 2235 | } |
1969 | 2236 | ||
1970 | static int stat_file_read_proc(char *page, char **start, off_t off, | 2237 | static int stat_file_read_proc(char *page, char **start, off_t off, |
@@ -2000,7 +2267,24 @@ static int stat_file_read_proc(char *page, char **start, off_t off, | |||
2000 | out += sprintf(out, "incoming_messages: %ld\n", | 2267 | out += sprintf(out, "incoming_messages: %ld\n", |
2001 | smi->incoming_messages); | 2268 | smi->incoming_messages); |
2002 | 2269 | ||
2003 | return (out - ((char *) page)); | 2270 | return out - page; |
2271 | } | ||
2272 | |||
2273 | static int param_read_proc(char *page, char **start, off_t off, | ||
2274 | int count, int *eof, void *data) | ||
2275 | { | ||
2276 | struct smi_info *smi = data; | ||
2277 | |||
2278 | return sprintf(page, | ||
2279 | "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", | ||
2280 | si_to_str[smi->si_type], | ||
2281 | addr_space_to_str[smi->io.addr_type], | ||
2282 | smi->io.addr_data, | ||
2283 | smi->io.regspacing, | ||
2284 | smi->io.regsize, | ||
2285 | smi->io.regshift, | ||
2286 | smi->irq, | ||
2287 | smi->slave_addr); | ||
2004 | } | 2288 | } |
2005 | 2289 | ||
2006 | /* | 2290 | /* |
@@ -2362,6 +2646,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2362 | new_smi, | 2646 | new_smi, |
2363 | &new_smi->device_id, | 2647 | &new_smi->device_id, |
2364 | new_smi->dev, | 2648 | new_smi->dev, |
2649 | "bmc", | ||
2365 | new_smi->slave_addr); | 2650 | new_smi->slave_addr); |
2366 | if (rv) { | 2651 | if (rv) { |
2367 | printk(KERN_ERR | 2652 | printk(KERN_ERR |
@@ -2390,6 +2675,16 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2390 | goto out_err_stop_timer; | 2675 | goto out_err_stop_timer; |
2391 | } | 2676 | } |
2392 | 2677 | ||
2678 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", | ||
2679 | param_read_proc, NULL, | ||
2680 | new_smi, THIS_MODULE); | ||
2681 | if (rv) { | ||
2682 | printk(KERN_ERR | ||
2683 | "ipmi_si: Unable to create proc entry: %d\n", | ||
2684 | rv); | ||
2685 | goto out_err_stop_timer; | ||
2686 | } | ||
2687 | |||
2393 | list_add_tail(&new_smi->link, &smi_infos); | 2688 | list_add_tail(&new_smi->link, &smi_infos); |
2394 | 2689 | ||
2395 | mutex_unlock(&smi_infos_lock); | 2690 | mutex_unlock(&smi_infos_lock); |
@@ -2483,7 +2778,12 @@ static __devinit int init_ipmi_si(void) | |||
2483 | #endif | 2778 | #endif |
2484 | 2779 | ||
2485 | #ifdef CONFIG_PCI | 2780 | #ifdef CONFIG_PCI |
2486 | pci_module_init(&ipmi_pci_driver); | 2781 | rv = pci_register_driver(&ipmi_pci_driver); |
2782 | if (rv){ | ||
2783 | printk(KERN_ERR | ||
2784 | "init_ipmi_si: Unable to register PCI driver: %d\n", | ||
2785 | rv); | ||
2786 | } | ||
2487 | #endif | 2787 | #endif |
2488 | 2788 | ||
2489 | if (si_trydefaults) { | 2789 | if (si_trydefaults) { |
@@ -2498,7 +2798,7 @@ static __devinit int init_ipmi_si(void) | |||
2498 | } | 2798 | } |
2499 | 2799 | ||
2500 | mutex_lock(&smi_infos_lock); | 2800 | mutex_lock(&smi_infos_lock); |
2501 | if (list_empty(&smi_infos)) { | 2801 | if (unload_when_empty && list_empty(&smi_infos)) { |
2502 | mutex_unlock(&smi_infos_lock); | 2802 | mutex_unlock(&smi_infos_lock); |
2503 | #ifdef CONFIG_PCI | 2803 | #ifdef CONFIG_PCI |
2504 | pci_unregister_driver(&ipmi_pci_driver); | 2804 | pci_unregister_driver(&ipmi_pci_driver); |
@@ -2513,7 +2813,7 @@ static __devinit int init_ipmi_si(void) | |||
2513 | } | 2813 | } |
2514 | module_init(init_ipmi_si); | 2814 | module_init(init_ipmi_si); |
2515 | 2815 | ||
2516 | static void __devexit cleanup_one_si(struct smi_info *to_clean) | 2816 | static void cleanup_one_si(struct smi_info *to_clean) |
2517 | { | 2817 | { |
2518 | int rv; | 2818 | int rv; |
2519 | unsigned long flags; | 2819 | unsigned long flags; |
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c index 39d7e5ef1a2b..e64ea7d25d24 100644 --- a/drivers/char/ipmi/ipmi_smic_sm.c +++ b/drivers/char/ipmi/ipmi_smic_sm.c | |||
@@ -141,12 +141,14 @@ static int start_smic_transaction(struct si_sm_data *smic, | |||
141 | { | 141 | { |
142 | unsigned int i; | 142 | unsigned int i; |
143 | 143 | ||
144 | if ((size < 2) || (size > MAX_SMIC_WRITE_SIZE)) { | 144 | if (size < 2) |
145 | return -1; | 145 | return IPMI_REQ_LEN_INVALID_ERR; |
146 | } | 146 | if (size > MAX_SMIC_WRITE_SIZE) |
147 | if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) { | 147 | return IPMI_REQ_LEN_EXCEEDED_ERR; |
148 | return -2; | 148 | |
149 | } | 149 | if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) |
150 | return IPMI_NOT_IN_MY_STATE_ERR; | ||
151 | |||
150 | if (smic_debug & SMIC_DEBUG_MSG) { | 152 | if (smic_debug & SMIC_DEBUG_MSG) { |
151 | printk(KERN_INFO "start_smic_transaction -"); | 153 | printk(KERN_INFO "start_smic_transaction -"); |
152 | for (i = 0; i < size; i ++) { | 154 | for (i = 0; i < size; i ++) { |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 73f759eaa5a6..90fb2a541916 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
@@ -135,6 +135,7 @@ | |||
135 | static int nowayout = WATCHDOG_NOWAYOUT; | 135 | static int nowayout = WATCHDOG_NOWAYOUT; |
136 | 136 | ||
137 | static ipmi_user_t watchdog_user = NULL; | 137 | static ipmi_user_t watchdog_user = NULL; |
138 | static int watchdog_ifnum; | ||
138 | 139 | ||
139 | /* Default the timeout to 10 seconds. */ | 140 | /* Default the timeout to 10 seconds. */ |
140 | static int timeout = 10; | 141 | static int timeout = 10; |
@@ -161,6 +162,8 @@ static struct fasync_struct *fasync_q = NULL; | |||
161 | static char pretimeout_since_last_heartbeat = 0; | 162 | static char pretimeout_since_last_heartbeat = 0; |
162 | static char expect_close; | 163 | static char expect_close; |
163 | 164 | ||
165 | static int ifnum_to_use = -1; | ||
166 | |||
164 | static DECLARE_RWSEM(register_sem); | 167 | static DECLARE_RWSEM(register_sem); |
165 | 168 | ||
166 | /* Parameters to ipmi_set_timeout */ | 169 | /* Parameters to ipmi_set_timeout */ |
@@ -169,6 +172,8 @@ static DECLARE_RWSEM(register_sem); | |||
169 | #define IPMI_SET_TIMEOUT_FORCE_HB 2 | 172 | #define IPMI_SET_TIMEOUT_FORCE_HB 2 |
170 | 173 | ||
171 | static int ipmi_set_timeout(int do_heartbeat); | 174 | static int ipmi_set_timeout(int do_heartbeat); |
175 | static void ipmi_register_watchdog(int ipmi_intf); | ||
176 | static void ipmi_unregister_watchdog(int ipmi_intf); | ||
172 | 177 | ||
173 | /* If true, the driver will start running as soon as it is configured | 178 | /* If true, the driver will start running as soon as it is configured |
174 | and ready. */ | 179 | and ready. */ |
@@ -245,6 +250,26 @@ static int get_param_str(char *buffer, struct kernel_param *kp) | |||
245 | return strlen(buffer); | 250 | return strlen(buffer); |
246 | } | 251 | } |
247 | 252 | ||
253 | |||
254 | static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp) | ||
255 | { | ||
256 | int rv = param_set_int(val, kp); | ||
257 | if (rv) | ||
258 | return rv; | ||
259 | if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum)) | ||
260 | return 0; | ||
261 | |||
262 | ipmi_unregister_watchdog(watchdog_ifnum); | ||
263 | ipmi_register_watchdog(ifnum_to_use); | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int, | ||
268 | &ifnum_to_use, 0644); | ||
269 | MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " | ||
270 | "timer. Setting to -1 defaults to the first registered " | ||
271 | "interface"); | ||
272 | |||
248 | module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644); | 273 | module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644); |
249 | MODULE_PARM_DESC(timeout, "Timeout value in seconds."); | 274 | MODULE_PARM_DESC(timeout, "Timeout value in seconds."); |
250 | 275 | ||
@@ -263,12 +288,13 @@ module_param_call(preop, set_param_str, get_param_str, preop_op, 0644); | |||
263 | MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " | 288 | MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " |
264 | "preop_none, preop_panic, preop_give_data."); | 289 | "preop_none, preop_panic, preop_give_data."); |
265 | 290 | ||
266 | module_param(start_now, int, 0); | 291 | module_param(start_now, int, 0444); |
267 | MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" | 292 | MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" |
268 | "soon as the driver is loaded."); | 293 | "soon as the driver is loaded."); |
269 | 294 | ||
270 | module_param(nowayout, int, 0644); | 295 | module_param(nowayout, int, 0644); |
271 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); | 296 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " |
297 | "(default=CONFIG_WATCHDOG_NOWAYOUT)"); | ||
272 | 298 | ||
273 | /* Default state of the timer. */ | 299 | /* Default state of the timer. */ |
274 | static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; | 300 | static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; |
@@ -872,6 +898,11 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
872 | if (watchdog_user) | 898 | if (watchdog_user) |
873 | goto out; | 899 | goto out; |
874 | 900 | ||
901 | if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf)) | ||
902 | goto out; | ||
903 | |||
904 | watchdog_ifnum = ipmi_intf; | ||
905 | |||
875 | rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user); | 906 | rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user); |
876 | if (rv < 0) { | 907 | if (rv < 0) { |
877 | printk(KERN_CRIT PFX "Unable to register with ipmi\n"); | 908 | printk(KERN_CRIT PFX "Unable to register with ipmi\n"); |
@@ -901,6 +932,39 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
901 | } | 932 | } |
902 | } | 933 | } |
903 | 934 | ||
935 | static void ipmi_unregister_watchdog(int ipmi_intf) | ||
936 | { | ||
937 | int rv; | ||
938 | |||
939 | down_write(®ister_sem); | ||
940 | |||
941 | if (!watchdog_user) | ||
942 | goto out; | ||
943 | |||
944 | if (watchdog_ifnum != ipmi_intf) | ||
945 | goto out; | ||
946 | |||
947 | /* Make sure no one can call us any more. */ | ||
948 | misc_deregister(&ipmi_wdog_miscdev); | ||
949 | |||
950 | /* Wait to make sure the message makes it out. The lower layer has | ||
951 | pointers to our buffers, we want to make sure they are done before | ||
952 | we release our memory. */ | ||
953 | while (atomic_read(&set_timeout_tofree)) | ||
954 | schedule_timeout_uninterruptible(1); | ||
955 | |||
956 | /* Disconnect from IPMI. */ | ||
957 | rv = ipmi_destroy_user(watchdog_user); | ||
958 | if (rv) { | ||
959 | printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n", | ||
960 | rv); | ||
961 | } | ||
962 | watchdog_user = NULL; | ||
963 | |||
964 | out: | ||
965 | up_write(®ister_sem); | ||
966 | } | ||
967 | |||
904 | #ifdef HAVE_NMI_HANDLER | 968 | #ifdef HAVE_NMI_HANDLER |
905 | static int | 969 | static int |
906 | ipmi_nmi(void *dev_id, int cpu, int handled) | 970 | ipmi_nmi(void *dev_id, int cpu, int handled) |
@@ -1004,9 +1068,7 @@ static void ipmi_new_smi(int if_num, struct device *device) | |||
1004 | 1068 | ||
1005 | static void ipmi_smi_gone(int if_num) | 1069 | static void ipmi_smi_gone(int if_num) |
1006 | { | 1070 | { |
1007 | /* This can never be called, because once the watchdog is | 1071 | ipmi_unregister_watchdog(if_num); |
1008 | registered, the interface can't go away until the watchdog | ||
1009 | is unregistered. */ | ||
1010 | } | 1072 | } |
1011 | 1073 | ||
1012 | static struct ipmi_smi_watcher smi_watcher = | 1074 | static struct ipmi_smi_watcher smi_watcher = |
@@ -1148,30 +1210,32 @@ static int __init ipmi_wdog_init(void) | |||
1148 | 1210 | ||
1149 | check_parms(); | 1211 | check_parms(); |
1150 | 1212 | ||
1213 | register_reboot_notifier(&wdog_reboot_notifier); | ||
1214 | atomic_notifier_chain_register(&panic_notifier_list, | ||
1215 | &wdog_panic_notifier); | ||
1216 | |||
1151 | rv = ipmi_smi_watcher_register(&smi_watcher); | 1217 | rv = ipmi_smi_watcher_register(&smi_watcher); |
1152 | if (rv) { | 1218 | if (rv) { |
1153 | #ifdef HAVE_NMI_HANDLER | 1219 | #ifdef HAVE_NMI_HANDLER |
1154 | if (preaction_val == WDOG_PRETIMEOUT_NMI) | 1220 | if (preaction_val == WDOG_PRETIMEOUT_NMI) |
1155 | release_nmi(&ipmi_nmi_handler); | 1221 | release_nmi(&ipmi_nmi_handler); |
1156 | #endif | 1222 | #endif |
1223 | atomic_notifier_chain_unregister(&panic_notifier_list, | ||
1224 | &wdog_panic_notifier); | ||
1225 | unregister_reboot_notifier(&wdog_reboot_notifier); | ||
1157 | printk(KERN_WARNING PFX "can't register smi watcher\n"); | 1226 | printk(KERN_WARNING PFX "can't register smi watcher\n"); |
1158 | return rv; | 1227 | return rv; |
1159 | } | 1228 | } |
1160 | 1229 | ||
1161 | register_reboot_notifier(&wdog_reboot_notifier); | ||
1162 | atomic_notifier_chain_register(&panic_notifier_list, | ||
1163 | &wdog_panic_notifier); | ||
1164 | |||
1165 | printk(KERN_INFO PFX "driver initialized\n"); | 1230 | printk(KERN_INFO PFX "driver initialized\n"); |
1166 | 1231 | ||
1167 | return 0; | 1232 | return 0; |
1168 | } | 1233 | } |
1169 | 1234 | ||
1170 | static __exit void ipmi_unregister_watchdog(void) | 1235 | static void __exit ipmi_wdog_exit(void) |
1171 | { | 1236 | { |
1172 | int rv; | 1237 | ipmi_smi_watcher_unregister(&smi_watcher); |
1173 | 1238 | ipmi_unregister_watchdog(watchdog_ifnum); | |
1174 | down_write(®ister_sem); | ||
1175 | 1239 | ||
1176 | #ifdef HAVE_NMI_HANDLER | 1240 | #ifdef HAVE_NMI_HANDLER |
1177 | if (nmi_handler_registered) | 1241 | if (nmi_handler_registered) |
@@ -1179,37 +1243,8 @@ static __exit void ipmi_unregister_watchdog(void) | |||
1179 | #endif | 1243 | #endif |
1180 | 1244 | ||
1181 | atomic_notifier_chain_unregister(&panic_notifier_list, | 1245 | atomic_notifier_chain_unregister(&panic_notifier_list, |
1182 | &wdog_panic_notifier); | 1246 | &wdog_panic_notifier); |
1183 | unregister_reboot_notifier(&wdog_reboot_notifier); | 1247 | unregister_reboot_notifier(&wdog_reboot_notifier); |
1184 | |||
1185 | if (! watchdog_user) | ||
1186 | goto out; | ||
1187 | |||
1188 | /* Make sure no one can call us any more. */ | ||
1189 | misc_deregister(&ipmi_wdog_miscdev); | ||
1190 | |||
1191 | /* Wait to make sure the message makes it out. The lower layer has | ||
1192 | pointers to our buffers, we want to make sure they are done before | ||
1193 | we release our memory. */ | ||
1194 | while (atomic_read(&set_timeout_tofree)) | ||
1195 | schedule_timeout_uninterruptible(1); | ||
1196 | |||
1197 | /* Disconnect from IPMI. */ | ||
1198 | rv = ipmi_destroy_user(watchdog_user); | ||
1199 | if (rv) { | ||
1200 | printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n", | ||
1201 | rv); | ||
1202 | } | ||
1203 | watchdog_user = NULL; | ||
1204 | |||
1205 | out: | ||
1206 | up_write(®ister_sem); | ||
1207 | } | ||
1208 | |||
1209 | static void __exit ipmi_wdog_exit(void) | ||
1210 | { | ||
1211 | ipmi_smi_watcher_unregister(&smi_watcher); | ||
1212 | ipmi_unregister_watchdog(); | ||
1213 | } | 1248 | } |
1214 | module_exit(ipmi_wdog_exit); | 1249 | module_exit(ipmi_wdog_exit); |
1215 | module_init(ipmi_wdog_init); | 1250 | module_init(ipmi_wdog_init); |
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index bd9195e17956..8f591945ebd9 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c | |||
@@ -3476,6 +3476,8 @@ static int stli_initecp(stlibrd_t *brdp) | |||
3476 | if (sig.magic != cpu_to_le32(ECP_MAGIC)) | 3476 | if (sig.magic != cpu_to_le32(ECP_MAGIC)) |
3477 | { | 3477 | { |
3478 | release_region(brdp->iobase, brdp->iosize); | 3478 | release_region(brdp->iobase, brdp->iosize); |
3479 | iounmap(brdp->membase); | ||
3480 | brdp->membase = NULL; | ||
3479 | return -ENODEV; | 3481 | return -ENODEV; |
3480 | } | 3482 | } |
3481 | 3483 | ||
@@ -3632,6 +3634,8 @@ static int stli_initonb(stlibrd_t *brdp) | |||
3632 | sig.magic3 != cpu_to_le16(ONB_MAGIC3)) | 3634 | sig.magic3 != cpu_to_le16(ONB_MAGIC3)) |
3633 | { | 3635 | { |
3634 | release_region(brdp->iobase, brdp->iosize); | 3636 | release_region(brdp->iobase, brdp->iosize); |
3637 | iounmap(brdp->membase); | ||
3638 | brdp->membase = NULL; | ||
3635 | return -ENODEV; | 3639 | return -ENODEV; |
3636 | } | 3640 | } |
3637 | 3641 | ||
diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 7a484fc7cb9e..7e975f606924 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c | |||
@@ -199,6 +199,8 @@ int misc_register(struct miscdevice * misc) | |||
199 | dev_t dev; | 199 | dev_t dev; |
200 | int err = 0; | 200 | int err = 0; |
201 | 201 | ||
202 | INIT_LIST_HEAD(&misc->list); | ||
203 | |||
202 | down(&misc_sem); | 204 | down(&misc_sem); |
203 | list_for_each_entry(c, &misc_list, list) { | 205 | list_for_each_entry(c, &misc_list, list) { |
204 | if (c->minor == misc->minor) { | 206 | if (c->minor == misc->minor) { |
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c index 22b9905c1e52..c09160383a53 100644 --- a/drivers/char/mmtimer.c +++ b/drivers/char/mmtimer.c | |||
@@ -680,7 +680,7 @@ static int __init mmtimer_init(void) | |||
680 | if (sn_rtc_cycles_per_second < 100000) { | 680 | if (sn_rtc_cycles_per_second < 100000) { |
681 | printk(KERN_ERR "%s: unable to determine clock frequency\n", | 681 | printk(KERN_ERR "%s: unable to determine clock frequency\n", |
682 | MMTIMER_NAME); | 682 | MMTIMER_NAME); |
683 | return -1; | 683 | goto out1; |
684 | } | 684 | } |
685 | 685 | ||
686 | mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / | 686 | mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / |
@@ -689,13 +689,13 @@ static int __init mmtimer_init(void) | |||
689 | if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) { | 689 | if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) { |
690 | printk(KERN_WARNING "%s: unable to allocate interrupt.", | 690 | printk(KERN_WARNING "%s: unable to allocate interrupt.", |
691 | MMTIMER_NAME); | 691 | MMTIMER_NAME); |
692 | return -1; | 692 | goto out1; |
693 | } | 693 | } |
694 | 694 | ||
695 | if (misc_register(&mmtimer_miscdev)) { | 695 | if (misc_register(&mmtimer_miscdev)) { |
696 | printk(KERN_ERR "%s: failed to register device\n", | 696 | printk(KERN_ERR "%s: failed to register device\n", |
697 | MMTIMER_NAME); | 697 | MMTIMER_NAME); |
698 | return -1; | 698 | goto out2; |
699 | } | 699 | } |
700 | 700 | ||
701 | /* Get max numbered node, calculate slots needed */ | 701 | /* Get max numbered node, calculate slots needed */ |
@@ -709,16 +709,18 @@ static int __init mmtimer_init(void) | |||
709 | if (timers == NULL) { | 709 | if (timers == NULL) { |
710 | printk(KERN_ERR "%s: failed to allocate memory for device\n", | 710 | printk(KERN_ERR "%s: failed to allocate memory for device\n", |
711 | MMTIMER_NAME); | 711 | MMTIMER_NAME); |
712 | return -1; | 712 | goto out3; |
713 | } | 713 | } |
714 | 714 | ||
715 | memset(timers,0,(sizeof(mmtimer_t *)*maxn)); | ||
716 | |||
715 | /* Allocate mmtimer_t's for each online node */ | 717 | /* Allocate mmtimer_t's for each online node */ |
716 | for_each_online_node(node) { | 718 | for_each_online_node(node) { |
717 | timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); | 719 | timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); |
718 | if (timers[node] == NULL) { | 720 | if (timers[node] == NULL) { |
719 | printk(KERN_ERR "%s: failed to allocate memory for device\n", | 721 | printk(KERN_ERR "%s: failed to allocate memory for device\n", |
720 | MMTIMER_NAME); | 722 | MMTIMER_NAME); |
721 | return -1; | 723 | goto out4; |
722 | } | 724 | } |
723 | for (i=0; i< NUM_COMPARATORS; i++) { | 725 | for (i=0; i< NUM_COMPARATORS; i++) { |
724 | mmtimer_t * base = timers[node] + i; | 726 | mmtimer_t * base = timers[node] + i; |
@@ -739,6 +741,17 @@ static int __init mmtimer_init(void) | |||
739 | sn_rtc_cycles_per_second/(unsigned long)1E6); | 741 | sn_rtc_cycles_per_second/(unsigned long)1E6); |
740 | 742 | ||
741 | return 0; | 743 | return 0; |
744 | |||
745 | out4: | ||
746 | for_each_online_node(node) { | ||
747 | kfree(timers[node]); | ||
748 | } | ||
749 | out3: | ||
750 | misc_deregister(&mmtimer_miscdev); | ||
751 | out2: | ||
752 | free_irq(SGI_MMTIMER_VECTOR, NULL); | ||
753 | out1: | ||
754 | return -1; | ||
742 | } | 755 | } |
743 | 756 | ||
744 | module_init(mmtimer_init); | 757 | module_init(mmtimer_init); |
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index 2d025a9fd14d..8b316953173d 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c | |||
@@ -498,9 +498,12 @@ static void __exit moxa_exit(void) | |||
498 | printk("Couldn't unregister MOXA Intellio family serial driver\n"); | 498 | printk("Couldn't unregister MOXA Intellio family serial driver\n"); |
499 | put_tty_driver(moxaDriver); | 499 | put_tty_driver(moxaDriver); |
500 | 500 | ||
501 | for (i = 0; i < MAX_BOARDS; i++) | 501 | for (i = 0; i < MAX_BOARDS; i++) { |
502 | if (moxaBaseAddr[i]) | ||
503 | iounmap(moxaBaseAddr[i]); | ||
502 | if (moxa_boards[i].busType == MOXA_BUS_TYPE_PCI) | 504 | if (moxa_boards[i].busType == MOXA_BUS_TYPE_PCI) |
503 | pci_dev_put(moxa_boards[i].pciInfo.pdev); | 505 | pci_dev_put(moxa_boards[i].pciInfo.pdev); |
506 | } | ||
504 | 507 | ||
505 | if (verbose) | 508 | if (verbose) |
506 | printk("Done\n"); | 509 | printk("Done\n"); |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 1bd12296dca5..74d21c1c104f 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -75,8 +75,10 @@ | |||
75 | #include <pcmcia/cisreg.h> | 75 | #include <pcmcia/cisreg.h> |
76 | #include <pcmcia/ds.h> | 76 | #include <pcmcia/ds.h> |
77 | 77 | ||
78 | #ifdef CONFIG_HDLC_MODULE | 78 | #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE)) |
79 | #define CONFIG_HDLC 1 | 79 | #define SYNCLINK_GENERIC_HDLC 1 |
80 | #else | ||
81 | #define SYNCLINK_GENERIC_HDLC 0 | ||
80 | #endif | 82 | #endif |
81 | 83 | ||
82 | #define GET_USER(error,value,addr) error = get_user(value,addr) | 84 | #define GET_USER(error,value,addr) error = get_user(value,addr) |
@@ -235,7 +237,7 @@ typedef struct _mgslpc_info { | |||
235 | int dosyncppp; | 237 | int dosyncppp; |
236 | spinlock_t netlock; | 238 | spinlock_t netlock; |
237 | 239 | ||
238 | #ifdef CONFIG_HDLC | 240 | #if SYNCLINK_GENERIC_HDLC |
239 | struct net_device *netdev; | 241 | struct net_device *netdev; |
240 | #endif | 242 | #endif |
241 | 243 | ||
@@ -392,7 +394,7 @@ static void tx_timeout(unsigned long context); | |||
392 | 394 | ||
393 | static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg); | 395 | static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg); |
394 | 396 | ||
395 | #ifdef CONFIG_HDLC | 397 | #if SYNCLINK_GENERIC_HDLC |
396 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) | 398 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) |
397 | static void hdlcdev_tx_done(MGSLPC_INFO *info); | 399 | static void hdlcdev_tx_done(MGSLPC_INFO *info); |
398 | static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size); | 400 | static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size); |
@@ -1053,7 +1055,7 @@ static void tx_done(MGSLPC_INFO *info) | |||
1053 | info->drop_rts_on_tx_done = 0; | 1055 | info->drop_rts_on_tx_done = 0; |
1054 | } | 1056 | } |
1055 | 1057 | ||
1056 | #ifdef CONFIG_HDLC | 1058 | #if SYNCLINK_GENERIC_HDLC |
1057 | if (info->netcount) | 1059 | if (info->netcount) |
1058 | hdlcdev_tx_done(info); | 1060 | hdlcdev_tx_done(info); |
1059 | else | 1061 | else |
@@ -1164,7 +1166,7 @@ static void dcd_change(MGSLPC_INFO *info) | |||
1164 | } | 1166 | } |
1165 | else | 1167 | else |
1166 | info->input_signal_events.dcd_down++; | 1168 | info->input_signal_events.dcd_down++; |
1167 | #ifdef CONFIG_HDLC | 1169 | #if SYNCLINK_GENERIC_HDLC |
1168 | if (info->netcount) { | 1170 | if (info->netcount) { |
1169 | if (info->serial_signals & SerialSignal_DCD) | 1171 | if (info->serial_signals & SerialSignal_DCD) |
1170 | netif_carrier_on(info->netdev); | 1172 | netif_carrier_on(info->netdev); |
@@ -2953,7 +2955,7 @@ static void mgslpc_add_device(MGSLPC_INFO *info) | |||
2953 | printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n", | 2955 | printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n", |
2954 | info->device_name, info->io_base, info->irq_level); | 2956 | info->device_name, info->io_base, info->irq_level); |
2955 | 2957 | ||
2956 | #ifdef CONFIG_HDLC | 2958 | #if SYNCLINK_GENERIC_HDLC |
2957 | hdlcdev_init(info); | 2959 | hdlcdev_init(info); |
2958 | #endif | 2960 | #endif |
2959 | } | 2961 | } |
@@ -2969,7 +2971,7 @@ static void mgslpc_remove_device(MGSLPC_INFO *remove_info) | |||
2969 | last->next_device = info->next_device; | 2971 | last->next_device = info->next_device; |
2970 | else | 2972 | else |
2971 | mgslpc_device_list = info->next_device; | 2973 | mgslpc_device_list = info->next_device; |
2972 | #ifdef CONFIG_HDLC | 2974 | #if SYNCLINK_GENERIC_HDLC |
2973 | hdlcdev_exit(info); | 2975 | hdlcdev_exit(info); |
2974 | #endif | 2976 | #endif |
2975 | release_resources(info); | 2977 | release_resources(info); |
@@ -3901,7 +3903,7 @@ static int rx_get_frame(MGSLPC_INFO *info) | |||
3901 | return_frame = 1; | 3903 | return_frame = 1; |
3902 | } | 3904 | } |
3903 | framesize = 0; | 3905 | framesize = 0; |
3904 | #ifdef CONFIG_HDLC | 3906 | #if SYNCLINK_GENERIC_HDLC |
3905 | { | 3907 | { |
3906 | struct net_device_stats *stats = hdlc_stats(info->netdev); | 3908 | struct net_device_stats *stats = hdlc_stats(info->netdev); |
3907 | stats->rx_errors++; | 3909 | stats->rx_errors++; |
@@ -3935,7 +3937,7 @@ static int rx_get_frame(MGSLPC_INFO *info) | |||
3935 | ++framesize; | 3937 | ++framesize; |
3936 | } | 3938 | } |
3937 | 3939 | ||
3938 | #ifdef CONFIG_HDLC | 3940 | #if SYNCLINK_GENERIC_HDLC |
3939 | if (info->netcount) | 3941 | if (info->netcount) |
3940 | hdlcdev_rx(info, buf->data, framesize); | 3942 | hdlcdev_rx(info, buf->data, framesize); |
3941 | else | 3943 | else |
@@ -4091,7 +4093,7 @@ static void tx_timeout(unsigned long context) | |||
4091 | 4093 | ||
4092 | spin_unlock_irqrestore(&info->lock,flags); | 4094 | spin_unlock_irqrestore(&info->lock,flags); |
4093 | 4095 | ||
4094 | #ifdef CONFIG_HDLC | 4096 | #if SYNCLINK_GENERIC_HDLC |
4095 | if (info->netcount) | 4097 | if (info->netcount) |
4096 | hdlcdev_tx_done(info); | 4098 | hdlcdev_tx_done(info); |
4097 | else | 4099 | else |
@@ -4099,7 +4101,7 @@ static void tx_timeout(unsigned long context) | |||
4099 | bh_transmit(info); | 4101 | bh_transmit(info); |
4100 | } | 4102 | } |
4101 | 4103 | ||
4102 | #ifdef CONFIG_HDLC | 4104 | #if SYNCLINK_GENERIC_HDLC |
4103 | 4105 | ||
4104 | /** | 4106 | /** |
4105 | * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) | 4107 | * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) |
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c index 7ac68cb3bedd..e79b2ede8510 100644 --- a/drivers/char/rio/rio_linux.c +++ b/drivers/char/rio/rio_linux.c | |||
@@ -1026,6 +1026,7 @@ static int __init rio_init(void) | |||
1026 | found++; | 1026 | found++; |
1027 | } else { | 1027 | } else { |
1028 | iounmap(p->RIOHosts[p->RIONumHosts].Caddr); | 1028 | iounmap(p->RIOHosts[p->RIONumHosts].Caddr); |
1029 | p->RIOHosts[p->RIONumHosts].Caddr = NULL; | ||
1029 | } | 1030 | } |
1030 | } | 1031 | } |
1031 | 1032 | ||
@@ -1078,6 +1079,7 @@ static int __init rio_init(void) | |||
1078 | found++; | 1079 | found++; |
1079 | } else { | 1080 | } else { |
1080 | iounmap(p->RIOHosts[p->RIONumHosts].Caddr); | 1081 | iounmap(p->RIOHosts[p->RIONumHosts].Caddr); |
1082 | p->RIOHosts[p->RIONumHosts].Caddr = NULL; | ||
1081 | } | 1083 | } |
1082 | #else | 1084 | #else |
1083 | printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n"); | 1085 | printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n"); |
@@ -1117,8 +1119,10 @@ static int __init rio_init(void) | |||
1117 | } | 1119 | } |
1118 | } | 1120 | } |
1119 | 1121 | ||
1120 | if (!okboard) | 1122 | if (!okboard) { |
1121 | iounmap(hp->Caddr); | 1123 | iounmap(hp->Caddr); |
1124 | hp->Caddr = NULL; | ||
1125 | } | ||
1122 | } | 1126 | } |
1123 | } | 1127 | } |
1124 | 1128 | ||
@@ -1188,6 +1192,8 @@ static void __exit rio_exit(void) | |||
1188 | } | 1192 | } |
1189 | /* It is safe/allowed to del_timer a non-active timer */ | 1193 | /* It is safe/allowed to del_timer a non-active timer */ |
1190 | del_timer(&hp->timer); | 1194 | del_timer(&hp->timer); |
1195 | if (hp->Caddr) | ||
1196 | iounmap(hp->Caddr); | ||
1191 | if (hp->Type == RIO_PCI) | 1197 | if (hp->Type == RIO_PCI) |
1192 | pci_dev_put(hp->pdev); | 1198 | pci_dev_put(hp->pdev); |
1193 | } | 1199 | } |
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c index 722dd3e74185..0a77bfcd5b5e 100644 --- a/drivers/char/riscom8.c +++ b/drivers/char/riscom8.c | |||
@@ -82,11 +82,6 @@ | |||
82 | static struct riscom_board * IRQ_to_board[16]; | 82 | static struct riscom_board * IRQ_to_board[16]; |
83 | static struct tty_driver *riscom_driver; | 83 | static struct tty_driver *riscom_driver; |
84 | 84 | ||
85 | static unsigned long baud_table[] = { | ||
86 | 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, | ||
87 | 9600, 19200, 38400, 57600, 76800, 0, | ||
88 | }; | ||
89 | |||
90 | static struct riscom_board rc_board[RC_NBOARD] = { | 85 | static struct riscom_board rc_board[RC_NBOARD] = { |
91 | { | 86 | { |
92 | .base = RC_IOBASE1, | 87 | .base = RC_IOBASE1, |
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index 147c30da81ea..645187b9141e 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c | |||
@@ -101,8 +101,10 @@ | |||
101 | #include <linux/hdlc.h> | 101 | #include <linux/hdlc.h> |
102 | #include <linux/dma-mapping.h> | 102 | #include <linux/dma-mapping.h> |
103 | 103 | ||
104 | #ifdef CONFIG_HDLC_MODULE | 104 | #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE)) |
105 | #define CONFIG_HDLC 1 | 105 | #define SYNCLINK_GENERIC_HDLC 1 |
106 | #else | ||
107 | #define SYNCLINK_GENERIC_HDLC 0 | ||
106 | #endif | 108 | #endif |
107 | 109 | ||
108 | #define GET_USER(error,value,addr) error = get_user(value,addr) | 110 | #define GET_USER(error,value,addr) error = get_user(value,addr) |
@@ -320,7 +322,7 @@ struct mgsl_struct { | |||
320 | int dosyncppp; | 322 | int dosyncppp; |
321 | spinlock_t netlock; | 323 | spinlock_t netlock; |
322 | 324 | ||
323 | #ifdef CONFIG_HDLC | 325 | #if SYNCLINK_GENERIC_HDLC |
324 | struct net_device *netdev; | 326 | struct net_device *netdev; |
325 | #endif | 327 | #endif |
326 | }; | 328 | }; |
@@ -728,7 +730,7 @@ static void usc_loopmode_send_done( struct mgsl_struct * info ); | |||
728 | 730 | ||
729 | static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); | 731 | static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); |
730 | 732 | ||
731 | #ifdef CONFIG_HDLC | 733 | #if SYNCLINK_GENERIC_HDLC |
732 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) | 734 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) |
733 | static void hdlcdev_tx_done(struct mgsl_struct *info); | 735 | static void hdlcdev_tx_done(struct mgsl_struct *info); |
734 | static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); | 736 | static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); |
@@ -1277,7 +1279,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info ) | |||
1277 | info->drop_rts_on_tx_done = 0; | 1279 | info->drop_rts_on_tx_done = 0; |
1278 | } | 1280 | } |
1279 | 1281 | ||
1280 | #ifdef CONFIG_HDLC | 1282 | #if SYNCLINK_GENERIC_HDLC |
1281 | if (info->netcount) | 1283 | if (info->netcount) |
1282 | hdlcdev_tx_done(info); | 1284 | hdlcdev_tx_done(info); |
1283 | else | 1285 | else |
@@ -1342,7 +1344,7 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info ) | |||
1342 | info->input_signal_events.dcd_up++; | 1344 | info->input_signal_events.dcd_up++; |
1343 | } else | 1345 | } else |
1344 | info->input_signal_events.dcd_down++; | 1346 | info->input_signal_events.dcd_down++; |
1345 | #ifdef CONFIG_HDLC | 1347 | #if SYNCLINK_GENERIC_HDLC |
1346 | if (info->netcount) { | 1348 | if (info->netcount) { |
1347 | if (status & MISCSTATUS_DCD) | 1349 | if (status & MISCSTATUS_DCD) |
1348 | netif_carrier_on(info->netdev); | 1350 | netif_carrier_on(info->netdev); |
@@ -4313,7 +4315,7 @@ static void mgsl_add_device( struct mgsl_struct *info ) | |||
4313 | info->max_frame_size ); | 4315 | info->max_frame_size ); |
4314 | } | 4316 | } |
4315 | 4317 | ||
4316 | #ifdef CONFIG_HDLC | 4318 | #if SYNCLINK_GENERIC_HDLC |
4317 | hdlcdev_init(info); | 4319 | hdlcdev_init(info); |
4318 | #endif | 4320 | #endif |
4319 | 4321 | ||
@@ -4471,7 +4473,7 @@ static void synclink_cleanup(void) | |||
4471 | 4473 | ||
4472 | info = mgsl_device_list; | 4474 | info = mgsl_device_list; |
4473 | while(info) { | 4475 | while(info) { |
4474 | #ifdef CONFIG_HDLC | 4476 | #if SYNCLINK_GENERIC_HDLC |
4475 | hdlcdev_exit(info); | 4477 | hdlcdev_exit(info); |
4476 | #endif | 4478 | #endif |
4477 | mgsl_release_resources(info); | 4479 | mgsl_release_resources(info); |
@@ -6645,7 +6647,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info) | |||
6645 | return_frame = 1; | 6647 | return_frame = 1; |
6646 | } | 6648 | } |
6647 | framesize = 0; | 6649 | framesize = 0; |
6648 | #ifdef CONFIG_HDLC | 6650 | #if SYNCLINK_GENERIC_HDLC |
6649 | { | 6651 | { |
6650 | struct net_device_stats *stats = hdlc_stats(info->netdev); | 6652 | struct net_device_stats *stats = hdlc_stats(info->netdev); |
6651 | stats->rx_errors++; | 6653 | stats->rx_errors++; |
@@ -6721,7 +6723,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info) | |||
6721 | *ptmp); | 6723 | *ptmp); |
6722 | } | 6724 | } |
6723 | 6725 | ||
6724 | #ifdef CONFIG_HDLC | 6726 | #if SYNCLINK_GENERIC_HDLC |
6725 | if (info->netcount) | 6727 | if (info->netcount) |
6726 | hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); | 6728 | hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); |
6727 | else | 6729 | else |
@@ -7625,7 +7627,7 @@ static void mgsl_tx_timeout(unsigned long context) | |||
7625 | 7627 | ||
7626 | spin_unlock_irqrestore(&info->irq_spinlock,flags); | 7628 | spin_unlock_irqrestore(&info->irq_spinlock,flags); |
7627 | 7629 | ||
7628 | #ifdef CONFIG_HDLC | 7630 | #if SYNCLINK_GENERIC_HDLC |
7629 | if (info->netcount) | 7631 | if (info->netcount) |
7630 | hdlcdev_tx_done(info); | 7632 | hdlcdev_tx_done(info); |
7631 | else | 7633 | else |
@@ -7701,7 +7703,7 @@ static int usc_loopmode_active( struct mgsl_struct * info) | |||
7701 | return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; | 7703 | return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; |
7702 | } | 7704 | } |
7703 | 7705 | ||
7704 | #ifdef CONFIG_HDLC | 7706 | #if SYNCLINK_GENERIC_HDLC |
7705 | 7707 | ||
7706 | /** | 7708 | /** |
7707 | * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) | 7709 | * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 07f34d43dc7f..e4730a7312b5 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -83,8 +83,10 @@ | |||
83 | 83 | ||
84 | #include "linux/synclink.h" | 84 | #include "linux/synclink.h" |
85 | 85 | ||
86 | #ifdef CONFIG_HDLC_MODULE | 86 | #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE)) |
87 | #define CONFIG_HDLC 1 | 87 | #define SYNCLINK_GENERIC_HDLC 1 |
88 | #else | ||
89 | #define SYNCLINK_GENERIC_HDLC 0 | ||
88 | #endif | 90 | #endif |
89 | 91 | ||
90 | /* | 92 | /* |
@@ -171,7 +173,7 @@ static void set_break(struct tty_struct *tty, int break_state); | |||
171 | /* | 173 | /* |
172 | * generic HDLC support and callbacks | 174 | * generic HDLC support and callbacks |
173 | */ | 175 | */ |
174 | #ifdef CONFIG_HDLC | 176 | #if SYNCLINK_GENERIC_HDLC |
175 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) | 177 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) |
176 | static void hdlcdev_tx_done(struct slgt_info *info); | 178 | static void hdlcdev_tx_done(struct slgt_info *info); |
177 | static void hdlcdev_rx(struct slgt_info *info, char *buf, int size); | 179 | static void hdlcdev_rx(struct slgt_info *info, char *buf, int size); |
@@ -359,7 +361,7 @@ struct slgt_info { | |||
359 | int netcount; | 361 | int netcount; |
360 | int dosyncppp; | 362 | int dosyncppp; |
361 | spinlock_t netlock; | 363 | spinlock_t netlock; |
362 | #ifdef CONFIG_HDLC | 364 | #if SYNCLINK_GENERIC_HDLC |
363 | struct net_device *netdev; | 365 | struct net_device *netdev; |
364 | #endif | 366 | #endif |
365 | 367 | ||
@@ -1354,7 +1356,7 @@ static void set_break(struct tty_struct *tty, int break_state) | |||
1354 | spin_unlock_irqrestore(&info->lock,flags); | 1356 | spin_unlock_irqrestore(&info->lock,flags); |
1355 | } | 1357 | } |
1356 | 1358 | ||
1357 | #ifdef CONFIG_HDLC | 1359 | #if SYNCLINK_GENERIC_HDLC |
1358 | 1360 | ||
1359 | /** | 1361 | /** |
1360 | * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) | 1362 | * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) |
@@ -2002,7 +2004,7 @@ static void dcd_change(struct slgt_info *info) | |||
2002 | } else { | 2004 | } else { |
2003 | info->input_signal_events.dcd_down++; | 2005 | info->input_signal_events.dcd_down++; |
2004 | } | 2006 | } |
2005 | #ifdef CONFIG_HDLC | 2007 | #if SYNCLINK_GENERIC_HDLC |
2006 | if (info->netcount) { | 2008 | if (info->netcount) { |
2007 | if (info->signals & SerialSignal_DCD) | 2009 | if (info->signals & SerialSignal_DCD) |
2008 | netif_carrier_on(info->netdev); | 2010 | netif_carrier_on(info->netdev); |
@@ -2180,7 +2182,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status) | |||
2180 | set_signals(info); | 2182 | set_signals(info); |
2181 | } | 2183 | } |
2182 | 2184 | ||
2183 | #ifdef CONFIG_HDLC | 2185 | #if SYNCLINK_GENERIC_HDLC |
2184 | if (info->netcount) | 2186 | if (info->netcount) |
2185 | hdlcdev_tx_done(info); | 2187 | hdlcdev_tx_done(info); |
2186 | else | 2188 | else |
@@ -3306,7 +3308,7 @@ static void add_device(struct slgt_info *info) | |||
3306 | devstr, info->device_name, info->phys_reg_addr, | 3308 | devstr, info->device_name, info->phys_reg_addr, |
3307 | info->irq_level, info->max_frame_size); | 3309 | info->irq_level, info->max_frame_size); |
3308 | 3310 | ||
3309 | #ifdef CONFIG_HDLC | 3311 | #if SYNCLINK_GENERIC_HDLC |
3310 | hdlcdev_init(info); | 3312 | hdlcdev_init(info); |
3311 | #endif | 3313 | #endif |
3312 | } | 3314 | } |
@@ -3488,7 +3490,7 @@ static void slgt_cleanup(void) | |||
3488 | /* release devices */ | 3490 | /* release devices */ |
3489 | info = slgt_device_list; | 3491 | info = slgt_device_list; |
3490 | while(info) { | 3492 | while(info) { |
3491 | #ifdef CONFIG_HDLC | 3493 | #if SYNCLINK_GENERIC_HDLC |
3492 | hdlcdev_exit(info); | 3494 | hdlcdev_exit(info); |
3493 | #endif | 3495 | #endif |
3494 | free_dma_bufs(info); | 3496 | free_dma_bufs(info); |
@@ -3522,6 +3524,7 @@ static int __init slgt_init(void) | |||
3522 | 3524 | ||
3523 | if (!slgt_device_list) { | 3525 | if (!slgt_device_list) { |
3524 | printk("%s no devices found\n",driver_name); | 3526 | printk("%s no devices found\n",driver_name); |
3527 | pci_unregister_driver(&pci_driver); | ||
3525 | return -ENODEV; | 3528 | return -ENODEV; |
3526 | } | 3529 | } |
3527 | 3530 | ||
@@ -4433,7 +4436,7 @@ check_again: | |||
4433 | framesize = 0; | 4436 | framesize = 0; |
4434 | } | 4437 | } |
4435 | 4438 | ||
4436 | #ifdef CONFIG_HDLC | 4439 | #if SYNCLINK_GENERIC_HDLC |
4437 | if (framesize == 0) { | 4440 | if (framesize == 0) { |
4438 | struct net_device_stats *stats = hdlc_stats(info->netdev); | 4441 | struct net_device_stats *stats = hdlc_stats(info->netdev); |
4439 | stats->rx_errors++; | 4442 | stats->rx_errors++; |
@@ -4476,7 +4479,7 @@ check_again: | |||
4476 | framesize++; | 4479 | framesize++; |
4477 | } | 4480 | } |
4478 | 4481 | ||
4479 | #ifdef CONFIG_HDLC | 4482 | #if SYNCLINK_GENERIC_HDLC |
4480 | if (info->netcount) | 4483 | if (info->netcount) |
4481 | hdlcdev_rx(info,info->tmp_rbuf, framesize); | 4484 | hdlcdev_rx(info,info->tmp_rbuf, framesize); |
4482 | else | 4485 | else |
@@ -4779,7 +4782,7 @@ static void tx_timeout(unsigned long context) | |||
4779 | info->tx_count = 0; | 4782 | info->tx_count = 0; |
4780 | spin_unlock_irqrestore(&info->lock,flags); | 4783 | spin_unlock_irqrestore(&info->lock,flags); |
4781 | 4784 | ||
4782 | #ifdef CONFIG_HDLC | 4785 | #if SYNCLINK_GENERIC_HDLC |
4783 | if (info->netcount) | 4786 | if (info->netcount) |
4784 | hdlcdev_tx_done(info); | 4787 | hdlcdev_tx_done(info); |
4785 | else | 4788 | else |
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index 13a57245cf2e..20a96ef250be 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c | |||
@@ -67,8 +67,10 @@ | |||
67 | #include <linux/workqueue.h> | 67 | #include <linux/workqueue.h> |
68 | #include <linux/hdlc.h> | 68 | #include <linux/hdlc.h> |
69 | 69 | ||
70 | #ifdef CONFIG_HDLC_MODULE | 70 | #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE)) |
71 | #define CONFIG_HDLC 1 | 71 | #define SYNCLINK_GENERIC_HDLC 1 |
72 | #else | ||
73 | #define SYNCLINK_GENERIC_HDLC 0 | ||
72 | #endif | 74 | #endif |
73 | 75 | ||
74 | #define GET_USER(error,value,addr) error = get_user(value,addr) | 76 | #define GET_USER(error,value,addr) error = get_user(value,addr) |
@@ -280,7 +282,7 @@ typedef struct _synclinkmp_info { | |||
280 | int dosyncppp; | 282 | int dosyncppp; |
281 | spinlock_t netlock; | 283 | spinlock_t netlock; |
282 | 284 | ||
283 | #ifdef CONFIG_HDLC | 285 | #if SYNCLINK_GENERIC_HDLC |
284 | struct net_device *netdev; | 286 | struct net_device *netdev; |
285 | #endif | 287 | #endif |
286 | 288 | ||
@@ -536,7 +538,7 @@ static void throttle(struct tty_struct * tty); | |||
536 | static void unthrottle(struct tty_struct * tty); | 538 | static void unthrottle(struct tty_struct * tty); |
537 | static void set_break(struct tty_struct *tty, int break_state); | 539 | static void set_break(struct tty_struct *tty, int break_state); |
538 | 540 | ||
539 | #ifdef CONFIG_HDLC | 541 | #if SYNCLINK_GENERIC_HDLC |
540 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) | 542 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) |
541 | static void hdlcdev_tx_done(SLMP_INFO *info); | 543 | static void hdlcdev_tx_done(SLMP_INFO *info); |
542 | static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size); | 544 | static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size); |
@@ -1607,7 +1609,7 @@ static void set_break(struct tty_struct *tty, int break_state) | |||
1607 | spin_unlock_irqrestore(&info->lock,flags); | 1609 | spin_unlock_irqrestore(&info->lock,flags); |
1608 | } | 1610 | } |
1609 | 1611 | ||
1610 | #ifdef CONFIG_HDLC | 1612 | #if SYNCLINK_GENERIC_HDLC |
1611 | 1613 | ||
1612 | /** | 1614 | /** |
1613 | * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) | 1615 | * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) |
@@ -2339,7 +2341,7 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status) | |||
2339 | set_signals(info); | 2341 | set_signals(info); |
2340 | } | 2342 | } |
2341 | 2343 | ||
2342 | #ifdef CONFIG_HDLC | 2344 | #if SYNCLINK_GENERIC_HDLC |
2343 | if (info->netcount) | 2345 | if (info->netcount) |
2344 | hdlcdev_tx_done(info); | 2346 | hdlcdev_tx_done(info); |
2345 | else | 2347 | else |
@@ -2523,7 +2525,7 @@ void isr_io_pin( SLMP_INFO *info, u16 status ) | |||
2523 | info->input_signal_events.dcd_up++; | 2525 | info->input_signal_events.dcd_up++; |
2524 | } else | 2526 | } else |
2525 | info->input_signal_events.dcd_down++; | 2527 | info->input_signal_events.dcd_down++; |
2526 | #ifdef CONFIG_HDLC | 2528 | #if SYNCLINK_GENERIC_HDLC |
2527 | if (info->netcount) { | 2529 | if (info->netcount) { |
2528 | if (status & SerialSignal_DCD) | 2530 | if (status & SerialSignal_DCD) |
2529 | netif_carrier_on(info->netdev); | 2531 | netif_carrier_on(info->netdev); |
@@ -3783,7 +3785,7 @@ void add_device(SLMP_INFO *info) | |||
3783 | info->irq_level, | 3785 | info->irq_level, |
3784 | info->max_frame_size ); | 3786 | info->max_frame_size ); |
3785 | 3787 | ||
3786 | #ifdef CONFIG_HDLC | 3788 | #if SYNCLINK_GENERIC_HDLC |
3787 | hdlcdev_init(info); | 3789 | hdlcdev_init(info); |
3788 | #endif | 3790 | #endif |
3789 | } | 3791 | } |
@@ -3977,7 +3979,7 @@ static void synclinkmp_cleanup(void) | |||
3977 | /* release devices */ | 3979 | /* release devices */ |
3978 | info = synclinkmp_device_list; | 3980 | info = synclinkmp_device_list; |
3979 | while(info) { | 3981 | while(info) { |
3980 | #ifdef CONFIG_HDLC | 3982 | #if SYNCLINK_GENERIC_HDLC |
3981 | hdlcdev_exit(info); | 3983 | hdlcdev_exit(info); |
3982 | #endif | 3984 | #endif |
3983 | free_dma_bufs(info); | 3985 | free_dma_bufs(info); |
@@ -4979,7 +4981,7 @@ CheckAgain: | |||
4979 | info->icount.rxcrc++; | 4981 | info->icount.rxcrc++; |
4980 | 4982 | ||
4981 | framesize = 0; | 4983 | framesize = 0; |
4982 | #ifdef CONFIG_HDLC | 4984 | #if SYNCLINK_GENERIC_HDLC |
4983 | { | 4985 | { |
4984 | struct net_device_stats *stats = hdlc_stats(info->netdev); | 4986 | struct net_device_stats *stats = hdlc_stats(info->netdev); |
4985 | stats->rx_errors++; | 4987 | stats->rx_errors++; |
@@ -5020,7 +5022,7 @@ CheckAgain: | |||
5020 | index = 0; | 5022 | index = 0; |
5021 | } | 5023 | } |
5022 | 5024 | ||
5023 | #ifdef CONFIG_HDLC | 5025 | #if SYNCLINK_GENERIC_HDLC |
5024 | if (info->netcount) | 5026 | if (info->netcount) |
5025 | hdlcdev_rx(info,info->tmp_rx_buf,framesize); | 5027 | hdlcdev_rx(info,info->tmp_rx_buf,framesize); |
5026 | else | 5028 | else |
@@ -5531,7 +5533,7 @@ void tx_timeout(unsigned long context) | |||
5531 | 5533 | ||
5532 | spin_unlock_irqrestore(&info->lock,flags); | 5534 | spin_unlock_irqrestore(&info->lock,flags); |
5533 | 5535 | ||
5534 | #ifdef CONFIG_HDLC | 5536 | #if SYNCLINK_GENERIC_HDLC |
5535 | if (info->netcount) | 5537 | if (info->netcount) |
5536 | hdlcdev_tx_done(info); | 5538 | hdlcdev_tx_done(info); |
5537 | else | 5539 | else |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index c64f5bcff947..05810c8d20bc 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -182,6 +182,18 @@ static struct sysrq_key_op sysrq_showstate_op = { | |||
182 | .enable_mask = SYSRQ_ENABLE_DUMP, | 182 | .enable_mask = SYSRQ_ENABLE_DUMP, |
183 | }; | 183 | }; |
184 | 184 | ||
185 | static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty) | ||
186 | { | ||
187 | show_state_filter(TASK_UNINTERRUPTIBLE); | ||
188 | } | ||
189 | static struct sysrq_key_op sysrq_showstate_blocked_op = { | ||
190 | .handler = sysrq_handle_showstate_blocked, | ||
191 | .help_msg = "showBlockedTasks", | ||
192 | .action_msg = "Show Blocked State", | ||
193 | .enable_mask = SYSRQ_ENABLE_DUMP, | ||
194 | }; | ||
195 | |||
196 | |||
185 | static void sysrq_handle_showmem(int key, struct tty_struct *tty) | 197 | static void sysrq_handle_showmem(int key, struct tty_struct *tty) |
186 | { | 198 | { |
187 | show_mem(); | 199 | show_mem(); |
@@ -304,7 +316,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { | |||
304 | /* May be assigned at init time by SMP VOYAGER */ | 316 | /* May be assigned at init time by SMP VOYAGER */ |
305 | NULL, /* v */ | 317 | NULL, /* v */ |
306 | NULL, /* w */ | 318 | NULL, /* w */ |
307 | NULL, /* x */ | 319 | &sysrq_showstate_blocked_op, /* x */ |
308 | NULL, /* y */ | 320 | NULL, /* y */ |
309 | NULL /* z */ | 321 | NULL /* z */ |
310 | }; | 322 | }; |
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index dd36fd04a842..07067c31c4ec 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c | |||
@@ -249,6 +249,7 @@ int tosh_smm(SMMRegisters *regs) | |||
249 | 249 | ||
250 | return eax; | 250 | return eax; |
251 | } | 251 | } |
252 | EXPORT_SYMBOL(tosh_smm); | ||
252 | 253 | ||
253 | 254 | ||
254 | static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, | 255 | static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 774fa861169a..33e1f66e39cb 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -1155,6 +1155,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend | |||
1155 | 1155 | ||
1156 | if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) { | 1156 | if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) { |
1157 | list_del(&chip->list); | 1157 | list_del(&chip->list); |
1158 | misc_deregister(&chip->vendor.miscdev); | ||
1158 | put_device(dev); | 1159 | put_device(dev); |
1159 | clear_bit(chip->dev_num, dev_mask); | 1160 | clear_bit(chip->dev_num, dev_mask); |
1160 | kfree(chip); | 1161 | kfree(chip); |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 75ff0286e1ad..a8239dac994f 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -152,7 +152,7 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y); | |||
152 | static void save_cur(struct vc_data *vc); | 152 | static void save_cur(struct vc_data *vc); |
153 | static void reset_terminal(struct vc_data *vc, int do_clear); | 153 | static void reset_terminal(struct vc_data *vc, int do_clear); |
154 | static void con_flush_chars(struct tty_struct *tty); | 154 | static void con_flush_chars(struct tty_struct *tty); |
155 | static void set_vesa_blanking(char __user *p); | 155 | static int set_vesa_blanking(char __user *p); |
156 | static void set_cursor(struct vc_data *vc); | 156 | static void set_cursor(struct vc_data *vc); |
157 | static void hide_cursor(struct vc_data *vc); | 157 | static void hide_cursor(struct vc_data *vc); |
158 | static void console_callback(struct work_struct *ignored); | 158 | static void console_callback(struct work_struct *ignored); |
@@ -2369,7 +2369,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) | |||
2369 | ret = __put_user(data, p); | 2369 | ret = __put_user(data, p); |
2370 | break; | 2370 | break; |
2371 | case TIOCL_SETVESABLANK: | 2371 | case TIOCL_SETVESABLANK: |
2372 | set_vesa_blanking(p); | 2372 | ret = set_vesa_blanking(p); |
2373 | break; | 2373 | break; |
2374 | case TIOCL_GETKMSGREDIRECT: | 2374 | case TIOCL_GETKMSGREDIRECT: |
2375 | data = kmsg_redirect; | 2375 | data = kmsg_redirect; |
@@ -3313,11 +3313,15 @@ postcore_initcall(vtconsole_class_init); | |||
3313 | * Screen blanking | 3313 | * Screen blanking |
3314 | */ | 3314 | */ |
3315 | 3315 | ||
3316 | static void set_vesa_blanking(char __user *p) | 3316 | static int set_vesa_blanking(char __user *p) |
3317 | { | 3317 | { |
3318 | unsigned int mode; | 3318 | unsigned int mode; |
3319 | get_user(mode, p + 1); | 3319 | |
3320 | vesa_blank_mode = (mode < 4) ? mode : 0; | 3320 | if (get_user(mode, p + 1)) |
3321 | return -EFAULT; | ||
3322 | |||
3323 | vesa_blank_mode = (mode < 4) ? mode : 0; | ||
3324 | return 0; | ||
3321 | } | 3325 | } |
3322 | 3326 | ||
3323 | void do_blank_screen(int entering_gfx) | 3327 | void do_blank_screen(int entering_gfx) |
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c index e275dd4a705d..61138726b501 100644 --- a/drivers/char/watchdog/pcwd_usb.c +++ b/drivers/char/watchdog/pcwd_usb.c | |||
@@ -634,7 +634,7 @@ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_devi | |||
634 | usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8); | 634 | usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8); |
635 | 635 | ||
636 | /* set up the memory buffer's */ | 636 | /* set up the memory buffer's */ |
637 | if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, SLAB_ATOMIC, &usb_pcwd->intr_dma))) { | 637 | if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, GFP_ATOMIC, &usb_pcwd->intr_dma))) { |
638 | printk(KERN_ERR PFX "Out of memory\n"); | 638 | printk(KERN_ERR PFX "Out of memory\n"); |
639 | goto error; | 639 | goto error; |
640 | } | 640 | } |