aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-17 15:31:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-17 15:31:07 -0400
commitc2896def9750fdb84604bbcf917dee1e7b0c20a3 (patch)
treee8ec3b3abeeeb31a2ce56c1ab24f1d9dde3fad26 /drivers/char
parent88764e0a3ecf655950c4d46b323cf80e2e0ee481 (diff)
parent7aefac26fc67158cb8826a5f5bfc2a5086a7d962 (diff)
Merge branch 'ipmi' (emailed ipmi fixes)
Merge ipmi fixes from Corey Minyard: "Things collected since last kernel release. Some of these are pretty important. The first three are bug fixes. The next two are to hopefully make everyone happy about allowing ACPI to be on all the time and not have IPMI have an effect on the system when not in use. The last is a little cleanup" * emailed patches from Corey Minyard <cminyard@mvista.com>: ipmi: boolify some things ipmi: Turn off all activity on an idle ipmi interface ipmi: Turn off default probing of interfaces ipmi: Reset the KCS timeout when starting error recovery ipmi: Fix a race restarting the timer Char: ipmi_bt_sm, fix infinite loop
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/ipmi/Kconfig12
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c5
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c239
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c145
5 files changed, 251 insertions, 152 deletions
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 0baa8fab4ea7..db1c9b7adaa6 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -50,6 +50,18 @@ config IPMI_SI
50 Currently, only KCS and SMIC are supported. If 50 Currently, only KCS and SMIC are supported. If
51 you are using IPMI, you should probably say "y" here. 51 you are using IPMI, you should probably say "y" here.
52 52
53config IPMI_SI_PROBE_DEFAULTS
54 bool 'Probe for all possible IPMI system interfaces by default'
55 default n
56 depends on IPMI_SI
57 help
58 Modern systems will usually expose IPMI interfaces via a discoverable
59 firmware mechanism such as ACPI or DMI. Older systems do not, and so
60 the driver is forced to probe hardware manually. This may cause boot
61 delays. Say "n" here to disable this manual probing. IPMI will then
62 only be available on older systems if the "ipmi_si_intf.trydefaults=1"
63 boot argument is passed.
64
53config IPMI_WATCHDOG 65config IPMI_WATCHDOG
54 tristate 'IPMI Watchdog Timer' 66 tristate 'IPMI Watchdog Timer'
55 help 67 help
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index f5e4cd7617f6..61e71616689b 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
352 352
353static inline int read_all_bytes(struct si_sm_data *bt) 353static inline int read_all_bytes(struct si_sm_data *bt)
354{ 354{
355 unsigned char i; 355 unsigned int i;
356 356
357 /* 357 /*
358 * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. 358 * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 6a4bdc18955a..8c25f596808a 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -251,8 +251,9 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
251 if (!GET_STATUS_OBF(status)) { 251 if (!GET_STATUS_OBF(status)) {
252 kcs->obf_timeout -= time; 252 kcs->obf_timeout -= time;
253 if (kcs->obf_timeout < 0) { 253 if (kcs->obf_timeout < 0) {
254 start_error_recovery(kcs, "OBF not ready in time"); 254 kcs->obf_timeout = OBF_RETRY_TIMEOUT;
255 return 1; 255 start_error_recovery(kcs, "OBF not ready in time");
256 return 1;
256 } 257 }
257 return 0; 258 return 0;
258 } 259 }
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index ec4e10fcf1a5..e6db9381b2c7 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -55,6 +55,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
55static int ipmi_init_msghandler(void); 55static int ipmi_init_msghandler(void);
56static void smi_recv_tasklet(unsigned long); 56static void smi_recv_tasklet(unsigned long);
57static void handle_new_recv_msgs(ipmi_smi_t intf); 57static void handle_new_recv_msgs(ipmi_smi_t intf);
58static void need_waiter(ipmi_smi_t intf);
58 59
59static int initialized; 60static int initialized;
60 61
@@ -73,14 +74,28 @@ static struct proc_dir_entry *proc_ipmi_root;
73 */ 74 */
74#define MAX_MSG_TIMEOUT 60000 75#define MAX_MSG_TIMEOUT 60000
75 76
77/* Call every ~1000 ms. */
78#define IPMI_TIMEOUT_TIME 1000
79
80/* How many jiffies does it take to get to the timeout time. */
81#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
82
83/*
84 * Request events from the queue every second (this is the number of
85 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
86 * future, IPMI will add a way to know immediately if an event is in
87 * the queue and this silliness can go away.
88 */
89#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
90
76/* 91/*
77 * The main "user" data structure. 92 * The main "user" data structure.
78 */ 93 */
79struct ipmi_user { 94struct ipmi_user {
80 struct list_head link; 95 struct list_head link;
81 96
82 /* Set to "0" when the user is destroyed. */ 97 /* Set to false when the user is destroyed. */
83 int valid; 98 bool valid;
84 99
85 struct kref refcount; 100 struct kref refcount;
86 101
@@ -92,7 +107,7 @@ struct ipmi_user {
92 ipmi_smi_t intf; 107 ipmi_smi_t intf;
93 108
94 /* Does this interface receive IPMI events? */ 109 /* Does this interface receive IPMI events? */
95 int gets_events; 110 bool gets_events;
96}; 111};
97 112
98struct cmd_rcvr { 113struct cmd_rcvr {
@@ -383,6 +398,9 @@ struct ipmi_smi {
383 unsigned int waiting_events_count; /* How many events in queue? */ 398 unsigned int waiting_events_count; /* How many events in queue? */
384 char delivering_events; 399 char delivering_events;
385 char event_msg_printed; 400 char event_msg_printed;
401 atomic_t event_waiters;
402 unsigned int ticks_to_req_ev;
403 int last_needs_timer;
386 404
387 /* 405 /*
388 * The event receiver for my BMC, only really used at panic 406 * The event receiver for my BMC, only really used at panic
@@ -395,7 +413,7 @@ struct ipmi_smi {
395 413
396 /* For handling of maintenance mode. */ 414 /* For handling of maintenance mode. */
397 int maintenance_mode; 415 int maintenance_mode;
398 int maintenance_mode_enable; 416 bool maintenance_mode_enable;
399 int auto_maintenance_timeout; 417 int auto_maintenance_timeout;
400 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 418 spinlock_t maintenance_mode_lock; /* Used in a timer... */
401 419
@@ -451,7 +469,6 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex);
451static LIST_HEAD(smi_watchers); 469static LIST_HEAD(smi_watchers);
452static DEFINE_MUTEX(smi_watchers_mutex); 470static DEFINE_MUTEX(smi_watchers_mutex);
453 471
454
455#define ipmi_inc_stat(intf, stat) \ 472#define ipmi_inc_stat(intf, stat) \
456 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 473 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
457#define ipmi_get_stat(intf, stat) \ 474#define ipmi_get_stat(intf, stat) \
@@ -772,6 +789,7 @@ static int intf_next_seq(ipmi_smi_t intf,
772 *seq = i; 789 *seq = i;
773 *seqid = intf->seq_table[i].seqid; 790 *seqid = intf->seq_table[i].seqid;
774 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 791 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
792 need_waiter(intf);
775 } else { 793 } else {
776 rv = -EAGAIN; 794 rv = -EAGAIN;
777 } 795 }
@@ -941,7 +959,7 @@ int ipmi_create_user(unsigned int if_num,
941 new_user->handler = handler; 959 new_user->handler = handler;
942 new_user->handler_data = handler_data; 960 new_user->handler_data = handler_data;
943 new_user->intf = intf; 961 new_user->intf = intf;
944 new_user->gets_events = 0; 962 new_user->gets_events = false;
945 963
946 if (!try_module_get(intf->handlers->owner)) { 964 if (!try_module_get(intf->handlers->owner)) {
947 rv = -ENODEV; 965 rv = -ENODEV;
@@ -962,10 +980,15 @@ int ipmi_create_user(unsigned int if_num,
962 */ 980 */
963 mutex_unlock(&ipmi_interfaces_mutex); 981 mutex_unlock(&ipmi_interfaces_mutex);
964 982
965 new_user->valid = 1; 983 new_user->valid = true;
966 spin_lock_irqsave(&intf->seq_lock, flags); 984 spin_lock_irqsave(&intf->seq_lock, flags);
967 list_add_rcu(&new_user->link, &intf->users); 985 list_add_rcu(&new_user->link, &intf->users);
968 spin_unlock_irqrestore(&intf->seq_lock, flags); 986 spin_unlock_irqrestore(&intf->seq_lock, flags);
987 if (handler->ipmi_watchdog_pretimeout) {
988 /* User wants pretimeouts, so make sure to watch for them. */
989 if (atomic_inc_return(&intf->event_waiters) == 1)
990 need_waiter(intf);
991 }
969 *user = new_user; 992 *user = new_user;
970 return 0; 993 return 0;
971 994
@@ -1019,7 +1042,13 @@ int ipmi_destroy_user(ipmi_user_t user)
1019 struct cmd_rcvr *rcvr; 1042 struct cmd_rcvr *rcvr;
1020 struct cmd_rcvr *rcvrs = NULL; 1043 struct cmd_rcvr *rcvrs = NULL;
1021 1044
1022 user->valid = 0; 1045 user->valid = false;
1046
1047 if (user->handler->ipmi_watchdog_pretimeout)
1048 atomic_dec(&intf->event_waiters);
1049
1050 if (user->gets_events)
1051 atomic_dec(&intf->event_waiters);
1023 1052
1024 /* Remove the user from the interface's sequence table. */ 1053 /* Remove the user from the interface's sequence table. */
1025 spin_lock_irqsave(&intf->seq_lock, flags); 1054 spin_lock_irqsave(&intf->seq_lock, flags);
@@ -1155,25 +1184,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1155 if (intf->maintenance_mode != mode) { 1184 if (intf->maintenance_mode != mode) {
1156 switch (mode) { 1185 switch (mode) {
1157 case IPMI_MAINTENANCE_MODE_AUTO: 1186 case IPMI_MAINTENANCE_MODE_AUTO:
1158 intf->maintenance_mode = mode;
1159 intf->maintenance_mode_enable 1187 intf->maintenance_mode_enable
1160 = (intf->auto_maintenance_timeout > 0); 1188 = (intf->auto_maintenance_timeout > 0);
1161 break; 1189 break;
1162 1190
1163 case IPMI_MAINTENANCE_MODE_OFF: 1191 case IPMI_MAINTENANCE_MODE_OFF:
1164 intf->maintenance_mode = mode; 1192 intf->maintenance_mode_enable = false;
1165 intf->maintenance_mode_enable = 0;
1166 break; 1193 break;
1167 1194
1168 case IPMI_MAINTENANCE_MODE_ON: 1195 case IPMI_MAINTENANCE_MODE_ON:
1169 intf->maintenance_mode = mode; 1196 intf->maintenance_mode_enable = true;
1170 intf->maintenance_mode_enable = 1;
1171 break; 1197 break;
1172 1198
1173 default: 1199 default:
1174 rv = -EINVAL; 1200 rv = -EINVAL;
1175 goto out_unlock; 1201 goto out_unlock;
1176 } 1202 }
1203 intf->maintenance_mode = mode;
1177 1204
1178 maintenance_mode_update(intf); 1205 maintenance_mode_update(intf);
1179 } 1206 }
@@ -1184,7 +1211,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1184} 1211}
1185EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1212EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1186 1213
1187int ipmi_set_gets_events(ipmi_user_t user, int val) 1214int ipmi_set_gets_events(ipmi_user_t user, bool val)
1188{ 1215{
1189 unsigned long flags; 1216 unsigned long flags;
1190 ipmi_smi_t intf = user->intf; 1217 ipmi_smi_t intf = user->intf;
@@ -1194,8 +1221,18 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
1194 INIT_LIST_HEAD(&msgs); 1221 INIT_LIST_HEAD(&msgs);
1195 1222
1196 spin_lock_irqsave(&intf->events_lock, flags); 1223 spin_lock_irqsave(&intf->events_lock, flags);
1224 if (user->gets_events == val)
1225 goto out;
1226
1197 user->gets_events = val; 1227 user->gets_events = val;
1198 1228
1229 if (val) {
1230 if (atomic_inc_return(&intf->event_waiters) == 1)
1231 need_waiter(intf);
1232 } else {
1233 atomic_dec(&intf->event_waiters);
1234 }
1235
1199 if (intf->delivering_events) 1236 if (intf->delivering_events)
1200 /* 1237 /*
1201 * Another thread is delivering events for this, so 1238 * Another thread is delivering events for this, so
@@ -1289,6 +1326,9 @@ int ipmi_register_for_cmd(ipmi_user_t user,
1289 goto out_unlock; 1326 goto out_unlock;
1290 } 1327 }
1291 1328
1329 if (atomic_inc_return(&intf->event_waiters) == 1)
1330 need_waiter(intf);
1331
1292 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1332 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1293 1333
1294 out_unlock: 1334 out_unlock:
@@ -1330,6 +1370,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
1330 mutex_unlock(&intf->cmd_rcvrs_mutex); 1370 mutex_unlock(&intf->cmd_rcvrs_mutex);
1331 synchronize_rcu(); 1371 synchronize_rcu();
1332 while (rcvrs) { 1372 while (rcvrs) {
1373 atomic_dec(&intf->event_waiters);
1333 rcvr = rcvrs; 1374 rcvr = rcvrs;
1334 rcvrs = rcvr->next; 1375 rcvrs = rcvr->next;
1335 kfree(rcvr); 1376 kfree(rcvr);
@@ -1535,7 +1576,7 @@ static int i_ipmi_request(ipmi_user_t user,
1535 = IPMI_MAINTENANCE_MODE_TIMEOUT; 1576 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1536 if (!intf->maintenance_mode 1577 if (!intf->maintenance_mode
1537 && !intf->maintenance_mode_enable) { 1578 && !intf->maintenance_mode_enable) {
1538 intf->maintenance_mode_enable = 1; 1579 intf->maintenance_mode_enable = true;
1539 maintenance_mode_update(intf); 1580 maintenance_mode_update(intf);
1540 } 1581 }
1541 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1582 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
@@ -2876,6 +2917,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2876 (unsigned long) intf); 2917 (unsigned long) intf);
2877 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 2918 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2878 spin_lock_init(&intf->events_lock); 2919 spin_lock_init(&intf->events_lock);
2920 atomic_set(&intf->event_waiters, 0);
2921 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2879 INIT_LIST_HEAD(&intf->waiting_events); 2922 INIT_LIST_HEAD(&intf->waiting_events);
2880 intf->waiting_events_count = 0; 2923 intf->waiting_events_count = 0;
2881 mutex_init(&intf->cmd_rcvrs_mutex); 2924 mutex_init(&intf->cmd_rcvrs_mutex);
@@ -3965,7 +4008,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3965 4008
3966static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, 4009static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3967 struct list_head *timeouts, long timeout_period, 4010 struct list_head *timeouts, long timeout_period,
3968 int slot, unsigned long *flags) 4011 int slot, unsigned long *flags,
4012 unsigned int *waiting_msgs)
3969{ 4013{
3970 struct ipmi_recv_msg *msg; 4014 struct ipmi_recv_msg *msg;
3971 struct ipmi_smi_handlers *handlers; 4015 struct ipmi_smi_handlers *handlers;
@@ -3977,8 +4021,10 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3977 return; 4021 return;
3978 4022
3979 ent->timeout -= timeout_period; 4023 ent->timeout -= timeout_period;
3980 if (ent->timeout > 0) 4024 if (ent->timeout > 0) {
4025 (*waiting_msgs)++;
3981 return; 4026 return;
4027 }
3982 4028
3983 if (ent->retries_left == 0) { 4029 if (ent->retries_left == 0) {
3984 /* The message has used all its retries. */ 4030 /* The message has used all its retries. */
@@ -3995,6 +4041,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3995 struct ipmi_smi_msg *smi_msg; 4041 struct ipmi_smi_msg *smi_msg;
3996 /* More retries, send again. */ 4042 /* More retries, send again. */
3997 4043
4044 (*waiting_msgs)++;
4045
3998 /* 4046 /*
3999 * Start with the max timer, set to normal timer after 4047 * Start with the max timer, set to normal timer after
4000 * the message is sent. 4048 * the message is sent.
@@ -4040,117 +4088,118 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
4040 } 4088 }
4041} 4089}
4042 4090
4043static void ipmi_timeout_handler(long timeout_period) 4091static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
4044{ 4092{
4045 ipmi_smi_t intf;
4046 struct list_head timeouts; 4093 struct list_head timeouts;
4047 struct ipmi_recv_msg *msg, *msg2; 4094 struct ipmi_recv_msg *msg, *msg2;
4048 unsigned long flags; 4095 unsigned long flags;
4049 int i; 4096 int i;
4097 unsigned int waiting_msgs = 0;
4050 4098
4051 rcu_read_lock(); 4099 /*
4052 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4100 * Go through the seq table and find any messages that
4053 tasklet_schedule(&intf->recv_tasklet); 4101 * have timed out, putting them in the timeouts
4054 4102 * list.
4055 /* 4103 */
4056 * Go through the seq table and find any messages that 4104 INIT_LIST_HEAD(&timeouts);
4057 * have timed out, putting them in the timeouts 4105 spin_lock_irqsave(&intf->seq_lock, flags);
4058 * list. 4106 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4059 */ 4107 check_msg_timeout(intf, &(intf->seq_table[i]),
4060 INIT_LIST_HEAD(&timeouts); 4108 &timeouts, timeout_period, i,
4061 spin_lock_irqsave(&intf->seq_lock, flags); 4109 &flags, &waiting_msgs);
4062 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4110 spin_unlock_irqrestore(&intf->seq_lock, flags);
4063 check_msg_timeout(intf, &(intf->seq_table[i]),
4064 &timeouts, timeout_period, i,
4065 &flags);
4066 spin_unlock_irqrestore(&intf->seq_lock, flags);
4067 4111
4068 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4112 list_for_each_entry_safe(msg, msg2, &timeouts, link)
4069 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); 4113 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
4070 4114
4071 /* 4115 /*
4072 * Maintenance mode handling. Check the timeout 4116 * Maintenance mode handling. Check the timeout
4073 * optimistically before we claim the lock. It may 4117 * optimistically before we claim the lock. It may
4074 * mean a timeout gets missed occasionally, but that 4118 * mean a timeout gets missed occasionally, but that
4075 * only means the timeout gets extended by one period 4119 * only means the timeout gets extended by one period
4076 * in that case. No big deal, and it avoids the lock 4120 * in that case. No big deal, and it avoids the lock
4077 * most of the time. 4121 * most of the time.
4078 */ 4122 */
4123 if (intf->auto_maintenance_timeout > 0) {
4124 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4079 if (intf->auto_maintenance_timeout > 0) { 4125 if (intf->auto_maintenance_timeout > 0) {
4080 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4126 intf->auto_maintenance_timeout
4081 if (intf->auto_maintenance_timeout > 0) { 4127 -= timeout_period;
4082 intf->auto_maintenance_timeout 4128 if (!intf->maintenance_mode
4083 -= timeout_period; 4129 && (intf->auto_maintenance_timeout <= 0)) {
4084 if (!intf->maintenance_mode 4130 intf->maintenance_mode_enable = false;
4085 && (intf->auto_maintenance_timeout <= 0)) { 4131 maintenance_mode_update(intf);
4086 intf->maintenance_mode_enable = 0;
4087 maintenance_mode_update(intf);
4088 }
4089 } 4132 }
4090 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4091 flags);
4092 } 4133 }
4134 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4135 flags);
4093 } 4136 }
4094 rcu_read_unlock(); 4137
4138 tasklet_schedule(&intf->recv_tasklet);
4139
4140 return waiting_msgs;
4095} 4141}
4096 4142
4097static void ipmi_request_event(void) 4143static void ipmi_request_event(ipmi_smi_t intf)
4098{ 4144{
4099 ipmi_smi_t intf;
4100 struct ipmi_smi_handlers *handlers; 4145 struct ipmi_smi_handlers *handlers;
4101 4146
4102 rcu_read_lock(); 4147 /* No event requests when in maintenance mode. */
4103 /* 4148 if (intf->maintenance_mode_enable)
4104 * Called from the timer, no need to check if handlers is 4149 return;
4105 * valid.
4106 */
4107 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4108 /* No event requests when in maintenance mode. */
4109 if (intf->maintenance_mode_enable)
4110 continue;
4111 4150
4112 handlers = intf->handlers; 4151 handlers = intf->handlers;
4113 if (handlers) 4152 if (handlers)
4114 handlers->request_events(intf->send_info); 4153 handlers->request_events(intf->send_info);
4115 }
4116 rcu_read_unlock();
4117} 4154}
4118 4155
4119static struct timer_list ipmi_timer; 4156static struct timer_list ipmi_timer;
4120 4157
4121/* Call every ~1000 ms. */
4122#define IPMI_TIMEOUT_TIME 1000
4123
4124/* How many jiffies does it take to get to the timeout time. */
4125#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
4126
4127/*
4128 * Request events from the queue every second (this is the number of
4129 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
4130 * future, IPMI will add a way to know immediately if an event is in
4131 * the queue and this silliness can go away.
4132 */
4133#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
4134
4135static atomic_t stop_operation; 4158static atomic_t stop_operation;
4136static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4137 4159
4138static void ipmi_timeout(unsigned long data) 4160static void ipmi_timeout(unsigned long data)
4139{ 4161{
4162 ipmi_smi_t intf;
4163 int nt = 0;
4164
4140 if (atomic_read(&stop_operation)) 4165 if (atomic_read(&stop_operation))
4141 return; 4166 return;
4142 4167
4143 ticks_to_req_ev--; 4168 rcu_read_lock();
4144 if (ticks_to_req_ev == 0) { 4169 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4145 ipmi_request_event(); 4170 int lnt = 0;
4146 ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 4171
4147 } 4172 if (atomic_read(&intf->event_waiters)) {
4173 intf->ticks_to_req_ev--;
4174 if (intf->ticks_to_req_ev == 0) {
4175 ipmi_request_event(intf);
4176 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4177 }
4178 lnt++;
4179 }
4148 4180
4149 ipmi_timeout_handler(IPMI_TIMEOUT_TIME); 4181 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4150 4182
4151 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4183 lnt = !!lnt;
4184 if (lnt != intf->last_needs_timer &&
4185 intf->handlers->set_need_watch)
4186 intf->handlers->set_need_watch(intf->send_info, lnt);
4187 intf->last_needs_timer = lnt;
4188
4189 nt += lnt;
4190 }
4191 rcu_read_unlock();
4192
4193 if (nt)
4194 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4152} 4195}
4153 4196
4197static void need_waiter(ipmi_smi_t intf)
4198{
4199 /* Racy, but worst case we start the timer twice. */
4200 if (!timer_pending(&ipmi_timer))
4201 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4202}
4154 4203
4155static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 4204static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4156static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 4205static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b7efd3c1a882..1c4bb4f6ce93 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -217,7 +217,7 @@ struct smi_info {
217 unsigned char msg_flags; 217 unsigned char msg_flags;
218 218
219 /* Does the BMC have an event buffer? */ 219 /* Does the BMC have an event buffer? */
220 char has_event_buffer; 220 bool has_event_buffer;
221 221
222 /* 222 /*
223 * If set to true, this will request events the next time the 223 * If set to true, this will request events the next time the
@@ -230,7 +230,7 @@ struct smi_info {
230 * call. Generally used after a panic to make sure stuff goes 230 * call. Generally used after a panic to make sure stuff goes
231 * out. 231 * out.
232 */ 232 */
233 int run_to_completion; 233 bool run_to_completion;
234 234
235 /* The I/O port of an SI interface. */ 235 /* The I/O port of an SI interface. */
236 int port; 236 int port;
@@ -248,19 +248,25 @@ struct smi_info {
248 /* The timer for this si. */ 248 /* The timer for this si. */
249 struct timer_list si_timer; 249 struct timer_list si_timer;
250 250
251 /* This flag is set, if the timer is running (timer_pending() isn't enough) */
252 bool timer_running;
253
251 /* The time (in jiffies) the last timeout occurred at. */ 254 /* The time (in jiffies) the last timeout occurred at. */
252 unsigned long last_timeout_jiffies; 255 unsigned long last_timeout_jiffies;
253 256
254 /* Used to gracefully stop the timer without race conditions. */ 257 /* Used to gracefully stop the timer without race conditions. */
255 atomic_t stop_operation; 258 atomic_t stop_operation;
256 259
260 /* Are we waiting for the events, pretimeouts, received msgs? */
261 atomic_t need_watch;
262
257 /* 263 /*
258 * The driver will disable interrupts when it gets into a 264 * The driver will disable interrupts when it gets into a
259 * situation where it cannot handle messages due to lack of 265 * situation where it cannot handle messages due to lack of
260 * memory. Once that situation clears up, it will re-enable 266 * memory. Once that situation clears up, it will re-enable
261 * interrupts. 267 * interrupts.
262 */ 268 */
263 int interrupt_disabled; 269 bool interrupt_disabled;
264 270
265 /* From the get device id response... */ 271 /* From the get device id response... */
266 struct ipmi_device_id device_id; 272 struct ipmi_device_id device_id;
@@ -273,7 +279,7 @@ struct smi_info {
273 * True if we allocated the device, false if it came from 279 * True if we allocated the device, false if it came from
274 * someplace else (like PCI). 280 * someplace else (like PCI).
275 */ 281 */
276 int dev_registered; 282 bool dev_registered;
277 283
278 /* Slave address, could be reported from DMI. */ 284 /* Slave address, could be reported from DMI. */
279 unsigned char slave_addr; 285 unsigned char slave_addr;
@@ -297,19 +303,19 @@ struct smi_info {
297static int force_kipmid[SI_MAX_PARMS]; 303static int force_kipmid[SI_MAX_PARMS];
298static int num_force_kipmid; 304static int num_force_kipmid;
299#ifdef CONFIG_PCI 305#ifdef CONFIG_PCI
300static int pci_registered; 306static bool pci_registered;
301#endif 307#endif
302#ifdef CONFIG_ACPI 308#ifdef CONFIG_ACPI
303static int pnp_registered; 309static bool pnp_registered;
304#endif 310#endif
305#ifdef CONFIG_PARISC 311#ifdef CONFIG_PARISC
306static int parisc_registered; 312static bool parisc_registered;
307#endif 313#endif
308 314
309static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; 315static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
310static int num_max_busy_us; 316static int num_max_busy_us;
311 317
312static int unload_when_empty = 1; 318static bool unload_when_empty = true;
313 319
314static int add_smi(struct smi_info *smi); 320static int add_smi(struct smi_info *smi);
315static int try_smi_init(struct smi_info *smi); 321static int try_smi_init(struct smi_info *smi);
@@ -434,6 +440,13 @@ static void start_clear_flags(struct smi_info *smi_info)
434 smi_info->si_state = SI_CLEARING_FLAGS; 440 smi_info->si_state = SI_CLEARING_FLAGS;
435} 441}
436 442
443static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
444{
445 smi_info->last_timeout_jiffies = jiffies;
446 mod_timer(&smi_info->si_timer, new_val);
447 smi_info->timer_running = true;
448}
449
437/* 450/*
438 * When we have a situtaion where we run out of memory and cannot 451 * When we have a situtaion where we run out of memory and cannot
439 * allocate messages, we just leave them in the BMC and run the system 452 * allocate messages, we just leave them in the BMC and run the system
@@ -444,10 +457,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
444{ 457{
445 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 458 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
446 start_disable_irq(smi_info); 459 start_disable_irq(smi_info);
447 smi_info->interrupt_disabled = 1; 460 smi_info->interrupt_disabled = true;
448 if (!atomic_read(&smi_info->stop_operation)) 461 if (!atomic_read(&smi_info->stop_operation))
449 mod_timer(&smi_info->si_timer, 462 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
450 jiffies + SI_TIMEOUT_JIFFIES);
451 } 463 }
452} 464}
453 465
@@ -455,7 +467,7 @@ static inline void enable_si_irq(struct smi_info *smi_info)
455{ 467{
456 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 468 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
457 start_enable_irq(smi_info); 469 start_enable_irq(smi_info);
458 smi_info->interrupt_disabled = 0; 470 smi_info->interrupt_disabled = false;
459 } 471 }
460} 472}
461 473
@@ -700,7 +712,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
700 dev_warn(smi_info->dev, 712 dev_warn(smi_info->dev,
701 "Maybe ok, but ipmi might run very slowly.\n"); 713 "Maybe ok, but ipmi might run very slowly.\n");
702 } else 714 } else
703 smi_info->interrupt_disabled = 0; 715 smi_info->interrupt_disabled = false;
704 smi_info->si_state = SI_NORMAL; 716 smi_info->si_state = SI_NORMAL;
705 break; 717 break;
706 } 718 }
@@ -853,6 +865,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
853 return si_sm_result; 865 return si_sm_result;
854} 866}
855 867
868static void check_start_timer_thread(struct smi_info *smi_info)
869{
870 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
871 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
872
873 if (smi_info->thread)
874 wake_up_process(smi_info->thread);
875
876 start_next_msg(smi_info);
877 smi_event_handler(smi_info, 0);
878 }
879}
880
856static void sender(void *send_info, 881static void sender(void *send_info,
857 struct ipmi_smi_msg *msg, 882 struct ipmi_smi_msg *msg,
858 int priority) 883 int priority)
@@ -906,27 +931,11 @@ static void sender(void *send_info,
906 else 931 else
907 list_add_tail(&msg->link, &smi_info->xmit_msgs); 932 list_add_tail(&msg->link, &smi_info->xmit_msgs);
908 933
909 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { 934 check_start_timer_thread(smi_info);
910 /*
911 * last_timeout_jiffies is updated here to avoid
912 * smi_timeout() handler passing very large time_diff
913 * value to smi_event_handler() that causes
914 * the send command to abort.
915 */
916 smi_info->last_timeout_jiffies = jiffies;
917
918 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
919
920 if (smi_info->thread)
921 wake_up_process(smi_info->thread);
922
923 start_next_msg(smi_info);
924 smi_event_handler(smi_info, 0);
925 }
926 spin_unlock_irqrestore(&smi_info->si_lock, flags); 935 spin_unlock_irqrestore(&smi_info->si_lock, flags);
927} 936}
928 937
929static void set_run_to_completion(void *send_info, int i_run_to_completion) 938static void set_run_to_completion(void *send_info, bool i_run_to_completion)
930{ 939{
931 struct smi_info *smi_info = send_info; 940 struct smi_info *smi_info = send_info;
932 enum si_sm_result result; 941 enum si_sm_result result;
@@ -1004,6 +1013,17 @@ static int ipmi_thread(void *data)
1004 1013
1005 spin_lock_irqsave(&(smi_info->si_lock), flags); 1014 spin_lock_irqsave(&(smi_info->si_lock), flags);
1006 smi_result = smi_event_handler(smi_info, 0); 1015 smi_result = smi_event_handler(smi_info, 0);
1016
1017 /*
1018 * If the driver is doing something, there is a possible
1019 * race with the timer. If the timer handler see idle,
1020 * and the thread here sees something else, the timer
1021 * handler won't restart the timer even though it is
1022 * required. So start it here if necessary.
1023 */
1024 if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
1025 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1026
1007 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1027 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1008 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, 1028 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1009 &busy_until); 1029 &busy_until);
@@ -1011,9 +1031,15 @@ static int ipmi_thread(void *data)
1011 ; /* do nothing */ 1031 ; /* do nothing */
1012 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) 1032 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1013 schedule(); 1033 schedule();
1014 else if (smi_result == SI_SM_IDLE) 1034 else if (smi_result == SI_SM_IDLE) {
1015 schedule_timeout_interruptible(100); 1035 if (atomic_read(&smi_info->need_watch)) {
1016 else 1036 schedule_timeout_interruptible(100);
1037 } else {
1038 /* Wait to be woken up when we are needed. */
1039 __set_current_state(TASK_INTERRUPTIBLE);
1040 schedule();
1041 }
1042 } else
1017 schedule_timeout_interruptible(1); 1043 schedule_timeout_interruptible(1);
1018 } 1044 }
1019 return 0; 1045 return 0;
@@ -1024,7 +1050,7 @@ static void poll(void *send_info)
1024{ 1050{
1025 struct smi_info *smi_info = send_info; 1051 struct smi_info *smi_info = send_info;
1026 unsigned long flags = 0; 1052 unsigned long flags = 0;
1027 int run_to_completion = smi_info->run_to_completion; 1053 bool run_to_completion = smi_info->run_to_completion;
1028 1054
1029 /* 1055 /*
1030 * Make sure there is some delay in the poll loop so we can 1056 * Make sure there is some delay in the poll loop so we can
@@ -1049,6 +1075,17 @@ static void request_events(void *send_info)
1049 atomic_set(&smi_info->req_events, 1); 1075 atomic_set(&smi_info->req_events, 1);
1050} 1076}
1051 1077
1078static void set_need_watch(void *send_info, bool enable)
1079{
1080 struct smi_info *smi_info = send_info;
1081 unsigned long flags;
1082
1083 atomic_set(&smi_info->need_watch, enable);
1084 spin_lock_irqsave(&smi_info->si_lock, flags);
1085 check_start_timer_thread(smi_info);
1086 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1087}
1088
1052static int initialized; 1089static int initialized;
1053 1090
1054static void smi_timeout(unsigned long data) 1091static void smi_timeout(unsigned long data)
@@ -1073,10 +1110,6 @@ static void smi_timeout(unsigned long data)
1073 * SI_USEC_PER_JIFFY); 1110 * SI_USEC_PER_JIFFY);
1074 smi_result = smi_event_handler(smi_info, time_diff); 1111 smi_result = smi_event_handler(smi_info, time_diff);
1075 1112
1076 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1077
1078 smi_info->last_timeout_jiffies = jiffies_now;
1079
1080 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 1113 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1081 /* Running with interrupts, only do long timeouts. */ 1114 /* Running with interrupts, only do long timeouts. */
1082 timeout = jiffies + SI_TIMEOUT_JIFFIES; 1115 timeout = jiffies + SI_TIMEOUT_JIFFIES;
@@ -1098,7 +1131,10 @@ static void smi_timeout(unsigned long data)
1098 1131
1099 do_mod_timer: 1132 do_mod_timer:
1100 if (smi_result != SI_SM_IDLE) 1133 if (smi_result != SI_SM_IDLE)
1101 mod_timer(&(smi_info->si_timer), timeout); 1134 smi_mod_timer(smi_info, timeout);
1135 else
1136 smi_info->timer_running = false;
1137 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1102} 1138}
1103 1139
1104static irqreturn_t si_irq_handler(int irq, void *data) 1140static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1146,8 +1182,7 @@ static int smi_start_processing(void *send_info,
1146 1182
1147 /* Set up the timer that drives the interface. */ 1183 /* Set up the timer that drives the interface. */
1148 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); 1184 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1149 new_smi->last_timeout_jiffies = jiffies; 1185 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1150 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1151 1186
1152 /* 1187 /*
1153 * Check if the user forcefully enabled the daemon. 1188 * Check if the user forcefully enabled the daemon.
@@ -1188,7 +1223,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1188 return 0; 1223 return 0;
1189} 1224}
1190 1225
1191static void set_maintenance_mode(void *send_info, int enable) 1226static void set_maintenance_mode(void *send_info, bool enable)
1192{ 1227{
1193 struct smi_info *smi_info = send_info; 1228 struct smi_info *smi_info = send_info;
1194 1229
@@ -1202,6 +1237,7 @@ static struct ipmi_smi_handlers handlers = {
1202 .get_smi_info = get_smi_info, 1237 .get_smi_info = get_smi_info,
1203 .sender = sender, 1238 .sender = sender,
1204 .request_events = request_events, 1239 .request_events = request_events,
1240 .set_need_watch = set_need_watch,
1205 .set_maintenance_mode = set_maintenance_mode, 1241 .set_maintenance_mode = set_maintenance_mode,
1206 .set_run_to_completion = set_run_to_completion, 1242 .set_run_to_completion = set_run_to_completion,
1207 .poll = poll, 1243 .poll = poll,
@@ -1229,7 +1265,7 @@ static bool si_tryplatform = 1;
1229#ifdef CONFIG_PCI 1265#ifdef CONFIG_PCI
1230static bool si_trypci = 1; 1266static bool si_trypci = 1;
1231#endif 1267#endif
1232static bool si_trydefaults = 1; 1268static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS);
1233static char *si_type[SI_MAX_PARMS]; 1269static char *si_type[SI_MAX_PARMS];
1234#define MAX_SI_TYPE_STR 30 1270#define MAX_SI_TYPE_STR 30
1235static char si_type_str[MAX_SI_TYPE_STR]; 1271static char si_type_str[MAX_SI_TYPE_STR];
@@ -1328,7 +1364,7 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1328MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" 1364MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1329 " disabled(0). Normally the IPMI driver auto-detects" 1365 " disabled(0). Normally the IPMI driver auto-detects"
1330 " this, but the value may be overridden by this parm."); 1366 " this, but the value may be overridden by this parm.");
1331module_param(unload_when_empty, int, 0); 1367module_param(unload_when_empty, bool, 0);
1332MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" 1368MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1333 " specified or found, default is 1. Setting to 0" 1369 " specified or found, default is 1. Setting to 0"
1334 " is useful for hot add of devices using hotmod."); 1370 " is useful for hot add of devices using hotmod.");
@@ -3336,18 +3372,19 @@ static int try_smi_init(struct smi_info *new_smi)
3336 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); 3372 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
3337 new_smi->curr_msg = NULL; 3373 new_smi->curr_msg = NULL;
3338 atomic_set(&new_smi->req_events, 0); 3374 atomic_set(&new_smi->req_events, 0);
3339 new_smi->run_to_completion = 0; 3375 new_smi->run_to_completion = false;
3340 for (i = 0; i < SI_NUM_STATS; i++) 3376 for (i = 0; i < SI_NUM_STATS; i++)
3341 atomic_set(&new_smi->stats[i], 0); 3377 atomic_set(&new_smi->stats[i], 0);
3342 3378
3343 new_smi->interrupt_disabled = 1; 3379 new_smi->interrupt_disabled = true;
3344 atomic_set(&new_smi->stop_operation, 0); 3380 atomic_set(&new_smi->stop_operation, 0);
3381 atomic_set(&new_smi->need_watch, 0);
3345 new_smi->intf_num = smi_num; 3382 new_smi->intf_num = smi_num;
3346 smi_num++; 3383 smi_num++;
3347 3384
3348 rv = try_enable_event_buffer(new_smi); 3385 rv = try_enable_event_buffer(new_smi);
3349 if (rv == 0) 3386 if (rv == 0)
3350 new_smi->has_event_buffer = 1; 3387 new_smi->has_event_buffer = true;
3351 3388
3352 /* 3389 /*
3353 * Start clearing the flags before we enable interrupts or the 3390 * Start clearing the flags before we enable interrupts or the
@@ -3381,7 +3418,7 @@ static int try_smi_init(struct smi_info *new_smi)
3381 rv); 3418 rv);
3382 goto out_err; 3419 goto out_err;
3383 } 3420 }
3384 new_smi->dev_registered = 1; 3421 new_smi->dev_registered = true;
3385 } 3422 }
3386 3423
3387 rv = ipmi_register_smi(&handlers, 3424 rv = ipmi_register_smi(&handlers,
@@ -3430,7 +3467,7 @@ static int try_smi_init(struct smi_info *new_smi)
3430 wait_for_timer_and_thread(new_smi); 3467 wait_for_timer_and_thread(new_smi);
3431 3468
3432 out_err: 3469 out_err:
3433 new_smi->interrupt_disabled = 1; 3470 new_smi->interrupt_disabled = true;
3434 3471
3435 if (new_smi->intf) { 3472 if (new_smi->intf) {
3436 ipmi_unregister_smi(new_smi->intf); 3473 ipmi_unregister_smi(new_smi->intf);
@@ -3466,7 +3503,7 @@ static int try_smi_init(struct smi_info *new_smi)
3466 3503
3467 if (new_smi->dev_registered) { 3504 if (new_smi->dev_registered) {
3468 platform_device_unregister(new_smi->pdev); 3505 platform_device_unregister(new_smi->pdev);
3469 new_smi->dev_registered = 0; 3506 new_smi->dev_registered = false;
3470 } 3507 }
3471 3508
3472 return rv; 3509 return rv;
@@ -3521,14 +3558,14 @@ static int init_ipmi_si(void)
3521 printk(KERN_ERR PFX "Unable to register " 3558 printk(KERN_ERR PFX "Unable to register "
3522 "PCI driver: %d\n", rv); 3559 "PCI driver: %d\n", rv);
3523 else 3560 else
3524 pci_registered = 1; 3561 pci_registered = true;
3525 } 3562 }
3526#endif 3563#endif
3527 3564
3528#ifdef CONFIG_ACPI 3565#ifdef CONFIG_ACPI
3529 if (si_tryacpi) { 3566 if (si_tryacpi) {
3530 pnp_register_driver(&ipmi_pnp_driver); 3567 pnp_register_driver(&ipmi_pnp_driver);
3531 pnp_registered = 1; 3568 pnp_registered = true;
3532 } 3569 }
3533#endif 3570#endif
3534 3571
@@ -3544,7 +3581,7 @@ static int init_ipmi_si(void)
3544 3581
3545#ifdef CONFIG_PARISC 3582#ifdef CONFIG_PARISC
3546 register_parisc_driver(&ipmi_parisc_driver); 3583 register_parisc_driver(&ipmi_parisc_driver);
3547 parisc_registered = 1; 3584 parisc_registered = true;
3548 /* poking PC IO addresses will crash machine, don't do it */ 3585 /* poking PC IO addresses will crash machine, don't do it */
3549 si_trydefaults = 0; 3586 si_trydefaults = 0;
3550#endif 3587#endif