aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCorey Minyard <cminyard@mvista.com>2014-04-14 10:46:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-17 15:30:40 -0400
commit7aefac26fc67158cb8826a5f5bfc2a5086a7d962 (patch)
treedd3c6050fd653f6bdffe51452db2358ff372bddb
parent89986496de141213206d49450ffdd36098d41209 (diff)
ipmi: boolify some things
Convert some ints to bools. Signed-off-by: Corey Minyard <cminyard@mvista.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c22
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c50
-rw-r--r--include/linux/ipmi_smi.h6
3 files changed, 38 insertions, 40 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 872c4ec79bf4..e6db9381b2c7 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -94,8 +94,8 @@ static struct proc_dir_entry *proc_ipmi_root;
94struct ipmi_user { 94struct ipmi_user {
95 struct list_head link; 95 struct list_head link;
96 96
97 /* Set to "0" when the user is destroyed. */ 97 /* Set to false when the user is destroyed. */
98 int valid; 98 bool valid;
99 99
100 struct kref refcount; 100 struct kref refcount;
101 101
@@ -413,7 +413,7 @@ struct ipmi_smi {
413 413
414 /* For handling of maintenance mode. */ 414 /* For handling of maintenance mode. */
415 int maintenance_mode; 415 int maintenance_mode;
416 int maintenance_mode_enable; 416 bool maintenance_mode_enable;
417 int auto_maintenance_timeout; 417 int auto_maintenance_timeout;
418 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 418 spinlock_t maintenance_mode_lock; /* Used in a timer... */
419 419
@@ -980,7 +980,7 @@ int ipmi_create_user(unsigned int if_num,
980 */ 980 */
981 mutex_unlock(&ipmi_interfaces_mutex); 981 mutex_unlock(&ipmi_interfaces_mutex);
982 982
983 new_user->valid = 1; 983 new_user->valid = true;
984 spin_lock_irqsave(&intf->seq_lock, flags); 984 spin_lock_irqsave(&intf->seq_lock, flags);
985 list_add_rcu(&new_user->link, &intf->users); 985 list_add_rcu(&new_user->link, &intf->users);
986 spin_unlock_irqrestore(&intf->seq_lock, flags); 986 spin_unlock_irqrestore(&intf->seq_lock, flags);
@@ -1042,7 +1042,7 @@ int ipmi_destroy_user(ipmi_user_t user)
1042 struct cmd_rcvr *rcvr; 1042 struct cmd_rcvr *rcvr;
1043 struct cmd_rcvr *rcvrs = NULL; 1043 struct cmd_rcvr *rcvrs = NULL;
1044 1044
1045 user->valid = 0; 1045 user->valid = false;
1046 1046
1047 if (user->handler->ipmi_watchdog_pretimeout) 1047 if (user->handler->ipmi_watchdog_pretimeout)
1048 atomic_dec(&intf->event_waiters); 1048 atomic_dec(&intf->event_waiters);
@@ -1184,25 +1184,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1184 if (intf->maintenance_mode != mode) { 1184 if (intf->maintenance_mode != mode) {
1185 switch (mode) { 1185 switch (mode) {
1186 case IPMI_MAINTENANCE_MODE_AUTO: 1186 case IPMI_MAINTENANCE_MODE_AUTO:
1187 intf->maintenance_mode = mode;
1188 intf->maintenance_mode_enable 1187 intf->maintenance_mode_enable
1189 = (intf->auto_maintenance_timeout > 0); 1188 = (intf->auto_maintenance_timeout > 0);
1190 break; 1189 break;
1191 1190
1192 case IPMI_MAINTENANCE_MODE_OFF: 1191 case IPMI_MAINTENANCE_MODE_OFF:
1193 intf->maintenance_mode = mode; 1192 intf->maintenance_mode_enable = false;
1194 intf->maintenance_mode_enable = 0;
1195 break; 1193 break;
1196 1194
1197 case IPMI_MAINTENANCE_MODE_ON: 1195 case IPMI_MAINTENANCE_MODE_ON:
1198 intf->maintenance_mode = mode; 1196 intf->maintenance_mode_enable = true;
1199 intf->maintenance_mode_enable = 1;
1200 break; 1197 break;
1201 1198
1202 default: 1199 default:
1203 rv = -EINVAL; 1200 rv = -EINVAL;
1204 goto out_unlock; 1201 goto out_unlock;
1205 } 1202 }
1203 intf->maintenance_mode = mode;
1206 1204
1207 maintenance_mode_update(intf); 1205 maintenance_mode_update(intf);
1208 } 1206 }
@@ -1578,7 +1576,7 @@ static int i_ipmi_request(ipmi_user_t user,
1578 = IPMI_MAINTENANCE_MODE_TIMEOUT; 1576 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1579 if (!intf->maintenance_mode 1577 if (!intf->maintenance_mode
1580 && !intf->maintenance_mode_enable) { 1578 && !intf->maintenance_mode_enable) {
1581 intf->maintenance_mode_enable = 1; 1579 intf->maintenance_mode_enable = true;
1582 maintenance_mode_update(intf); 1580 maintenance_mode_update(intf);
1583 } 1581 }
1584 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1582 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
@@ -4129,7 +4127,7 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
4129 -= timeout_period; 4127 -= timeout_period;
4130 if (!intf->maintenance_mode 4128 if (!intf->maintenance_mode
4131 && (intf->auto_maintenance_timeout <= 0)) { 4129 && (intf->auto_maintenance_timeout <= 0)) {
4132 intf->maintenance_mode_enable = 0; 4130 intf->maintenance_mode_enable = false;
4133 maintenance_mode_update(intf); 4131 maintenance_mode_update(intf);
4134 } 4132 }
4135 } 4133 }
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 444ea548dfe3..1c4bb4f6ce93 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -217,7 +217,7 @@ struct smi_info {
217 unsigned char msg_flags; 217 unsigned char msg_flags;
218 218
219 /* Does the BMC have an event buffer? */ 219 /* Does the BMC have an event buffer? */
220 char has_event_buffer; 220 bool has_event_buffer;
221 221
222 /* 222 /*
223 * If set to true, this will request events the next time the 223 * If set to true, this will request events the next time the
@@ -230,7 +230,7 @@ struct smi_info {
230 * call. Generally used after a panic to make sure stuff goes 230 * call. Generally used after a panic to make sure stuff goes
231 * out. 231 * out.
232 */ 232 */
233 int run_to_completion; 233 bool run_to_completion;
234 234
235 /* The I/O port of an SI interface. */ 235 /* The I/O port of an SI interface. */
236 int port; 236 int port;
@@ -266,7 +266,7 @@ struct smi_info {
266 * memory. Once that situation clears up, it will re-enable 266 * memory. Once that situation clears up, it will re-enable
267 * interrupts. 267 * interrupts.
268 */ 268 */
269 int interrupt_disabled; 269 bool interrupt_disabled;
270 270
271 /* From the get device id response... */ 271 /* From the get device id response... */
272 struct ipmi_device_id device_id; 272 struct ipmi_device_id device_id;
@@ -279,7 +279,7 @@ struct smi_info {
279 * True if we allocated the device, false if it came from 279 * True if we allocated the device, false if it came from
280 * someplace else (like PCI). 280 * someplace else (like PCI).
281 */ 281 */
282 int dev_registered; 282 bool dev_registered;
283 283
284 /* Slave address, could be reported from DMI. */ 284 /* Slave address, could be reported from DMI. */
285 unsigned char slave_addr; 285 unsigned char slave_addr;
@@ -303,19 +303,19 @@ struct smi_info {
303static int force_kipmid[SI_MAX_PARMS]; 303static int force_kipmid[SI_MAX_PARMS];
304static int num_force_kipmid; 304static int num_force_kipmid;
305#ifdef CONFIG_PCI 305#ifdef CONFIG_PCI
306static int pci_registered; 306static bool pci_registered;
307#endif 307#endif
308#ifdef CONFIG_ACPI 308#ifdef CONFIG_ACPI
309static int pnp_registered; 309static bool pnp_registered;
310#endif 310#endif
311#ifdef CONFIG_PARISC 311#ifdef CONFIG_PARISC
312static int parisc_registered; 312static bool parisc_registered;
313#endif 313#endif
314 314
315static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; 315static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
316static int num_max_busy_us; 316static int num_max_busy_us;
317 317
318static int unload_when_empty = 1; 318static bool unload_when_empty = true;
319 319
320static int add_smi(struct smi_info *smi); 320static int add_smi(struct smi_info *smi);
321static int try_smi_init(struct smi_info *smi); 321static int try_smi_init(struct smi_info *smi);
@@ -457,7 +457,7 @@ static inline void disable_si_irq(struct smi_info *smi_info)
457{ 457{
458 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 458 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
459 start_disable_irq(smi_info); 459 start_disable_irq(smi_info);
460 smi_info->interrupt_disabled = 1; 460 smi_info->interrupt_disabled = true;
461 if (!atomic_read(&smi_info->stop_operation)) 461 if (!atomic_read(&smi_info->stop_operation))
462 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); 462 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
463 } 463 }
@@ -467,7 +467,7 @@ static inline void enable_si_irq(struct smi_info *smi_info)
467{ 467{
468 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 468 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
469 start_enable_irq(smi_info); 469 start_enable_irq(smi_info);
470 smi_info->interrupt_disabled = 0; 470 smi_info->interrupt_disabled = false;
471 } 471 }
472} 472}
473 473
@@ -712,7 +712,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
712 dev_warn(smi_info->dev, 712 dev_warn(smi_info->dev,
713 "Maybe ok, but ipmi might run very slowly.\n"); 713 "Maybe ok, but ipmi might run very slowly.\n");
714 } else 714 } else
715 smi_info->interrupt_disabled = 0; 715 smi_info->interrupt_disabled = false;
716 smi_info->si_state = SI_NORMAL; 716 smi_info->si_state = SI_NORMAL;
717 break; 717 break;
718 } 718 }
@@ -935,7 +935,7 @@ static void sender(void *send_info,
935 spin_unlock_irqrestore(&smi_info->si_lock, flags); 935 spin_unlock_irqrestore(&smi_info->si_lock, flags);
936} 936}
937 937
938static void set_run_to_completion(void *send_info, int i_run_to_completion) 938static void set_run_to_completion(void *send_info, bool i_run_to_completion)
939{ 939{
940 struct smi_info *smi_info = send_info; 940 struct smi_info *smi_info = send_info;
941 enum si_sm_result result; 941 enum si_sm_result result;
@@ -1050,7 +1050,7 @@ static void poll(void *send_info)
1050{ 1050{
1051 struct smi_info *smi_info = send_info; 1051 struct smi_info *smi_info = send_info;
1052 unsigned long flags = 0; 1052 unsigned long flags = 0;
1053 int run_to_completion = smi_info->run_to_completion; 1053 bool run_to_completion = smi_info->run_to_completion;
1054 1054
1055 /* 1055 /*
1056 * Make sure there is some delay in the poll loop so we can 1056 * Make sure there is some delay in the poll loop so we can
@@ -1075,7 +1075,7 @@ static void request_events(void *send_info)
1075 atomic_set(&smi_info->req_events, 1); 1075 atomic_set(&smi_info->req_events, 1);
1076} 1076}
1077 1077
1078static void set_need_watch(void *send_info, int enable) 1078static void set_need_watch(void *send_info, bool enable)
1079{ 1079{
1080 struct smi_info *smi_info = send_info; 1080 struct smi_info *smi_info = send_info;
1081 unsigned long flags; 1081 unsigned long flags;
@@ -1223,7 +1223,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1223 return 0; 1223 return 0;
1224} 1224}
1225 1225
1226static void set_maintenance_mode(void *send_info, int enable) 1226static void set_maintenance_mode(void *send_info, bool enable)
1227{ 1227{
1228 struct smi_info *smi_info = send_info; 1228 struct smi_info *smi_info = send_info;
1229 1229
@@ -1364,7 +1364,7 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1364MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" 1364MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1365 " disabled(0). Normally the IPMI driver auto-detects" 1365 " disabled(0). Normally the IPMI driver auto-detects"
1366 " this, but the value may be overridden by this parm."); 1366 " this, but the value may be overridden by this parm.");
1367module_param(unload_when_empty, int, 0); 1367module_param(unload_when_empty, bool, 0);
1368MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" 1368MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1369 " specified or found, default is 1. Setting to 0" 1369 " specified or found, default is 1. Setting to 0"
1370 " is useful for hot add of devices using hotmod."); 1370 " is useful for hot add of devices using hotmod.");
@@ -3372,11 +3372,11 @@ static int try_smi_init(struct smi_info *new_smi)
3372 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); 3372 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
3373 new_smi->curr_msg = NULL; 3373 new_smi->curr_msg = NULL;
3374 atomic_set(&new_smi->req_events, 0); 3374 atomic_set(&new_smi->req_events, 0);
3375 new_smi->run_to_completion = 0; 3375 new_smi->run_to_completion = false;
3376 for (i = 0; i < SI_NUM_STATS; i++) 3376 for (i = 0; i < SI_NUM_STATS; i++)
3377 atomic_set(&new_smi->stats[i], 0); 3377 atomic_set(&new_smi->stats[i], 0);
3378 3378
3379 new_smi->interrupt_disabled = 1; 3379 new_smi->interrupt_disabled = true;
3380 atomic_set(&new_smi->stop_operation, 0); 3380 atomic_set(&new_smi->stop_operation, 0);
3381 atomic_set(&new_smi->need_watch, 0); 3381 atomic_set(&new_smi->need_watch, 0);
3382 new_smi->intf_num = smi_num; 3382 new_smi->intf_num = smi_num;
@@ -3384,7 +3384,7 @@ static int try_smi_init(struct smi_info *new_smi)
3384 3384
3385 rv = try_enable_event_buffer(new_smi); 3385 rv = try_enable_event_buffer(new_smi);
3386 if (rv == 0) 3386 if (rv == 0)
3387 new_smi->has_event_buffer = 1; 3387 new_smi->has_event_buffer = true;
3388 3388
3389 /* 3389 /*
3390 * Start clearing the flags before we enable interrupts or the 3390 * Start clearing the flags before we enable interrupts or the
@@ -3418,7 +3418,7 @@ static int try_smi_init(struct smi_info *new_smi)
3418 rv); 3418 rv);
3419 goto out_err; 3419 goto out_err;
3420 } 3420 }
3421 new_smi->dev_registered = 1; 3421 new_smi->dev_registered = true;
3422 } 3422 }
3423 3423
3424 rv = ipmi_register_smi(&handlers, 3424 rv = ipmi_register_smi(&handlers,
@@ -3467,7 +3467,7 @@ static int try_smi_init(struct smi_info *new_smi)
3467 wait_for_timer_and_thread(new_smi); 3467 wait_for_timer_and_thread(new_smi);
3468 3468
3469 out_err: 3469 out_err:
3470 new_smi->interrupt_disabled = 1; 3470 new_smi->interrupt_disabled = true;
3471 3471
3472 if (new_smi->intf) { 3472 if (new_smi->intf) {
3473 ipmi_unregister_smi(new_smi->intf); 3473 ipmi_unregister_smi(new_smi->intf);
@@ -3503,7 +3503,7 @@ static int try_smi_init(struct smi_info *new_smi)
3503 3503
3504 if (new_smi->dev_registered) { 3504 if (new_smi->dev_registered) {
3505 platform_device_unregister(new_smi->pdev); 3505 platform_device_unregister(new_smi->pdev);
3506 new_smi->dev_registered = 0; 3506 new_smi->dev_registered = false;
3507 } 3507 }
3508 3508
3509 return rv; 3509 return rv;
@@ -3558,14 +3558,14 @@ static int init_ipmi_si(void)
3558 printk(KERN_ERR PFX "Unable to register " 3558 printk(KERN_ERR PFX "Unable to register "
3559 "PCI driver: %d\n", rv); 3559 "PCI driver: %d\n", rv);
3560 else 3560 else
3561 pci_registered = 1; 3561 pci_registered = true;
3562 } 3562 }
3563#endif 3563#endif
3564 3564
3565#ifdef CONFIG_ACPI 3565#ifdef CONFIG_ACPI
3566 if (si_tryacpi) { 3566 if (si_tryacpi) {
3567 pnp_register_driver(&ipmi_pnp_driver); 3567 pnp_register_driver(&ipmi_pnp_driver);
3568 pnp_registered = 1; 3568 pnp_registered = true;
3569 } 3569 }
3570#endif 3570#endif
3571 3571
@@ -3581,7 +3581,7 @@ static int init_ipmi_si(void)
3581 3581
3582#ifdef CONFIG_PARISC 3582#ifdef CONFIG_PARISC
3583 register_parisc_driver(&ipmi_parisc_driver); 3583 register_parisc_driver(&ipmi_parisc_driver);
3584 parisc_registered = 1; 3584 parisc_registered = true;
3585 /* poking PC IO addresses will crash machine, don't do it */ 3585 /* poking PC IO addresses will crash machine, don't do it */
3586 si_trydefaults = 0; 3586 si_trydefaults = 0;
3587#endif 3587#endif
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 2a7ff302d990..bd349240d50e 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -114,14 +114,14 @@ struct ipmi_smi_handlers {
114 pretimeouts, or not. Used by the SMI to know if it should 114 pretimeouts, or not. Used by the SMI to know if it should
115 watch for these. This may be NULL if the SMI does not 115 watch for these. This may be NULL if the SMI does not
116 implement it. */ 116 implement it. */
117 void (*set_need_watch)(void *send_info, int enable); 117 void (*set_need_watch)(void *send_info, bool enable);
118 118
119 /* Called when the interface should go into "run to 119 /* Called when the interface should go into "run to
120 completion" mode. If this call sets the value to true, the 120 completion" mode. If this call sets the value to true, the
121 interface should make sure that all messages are flushed 121 interface should make sure that all messages are flushed
122 out and that none are pending, and any new requests are run 122 out and that none are pending, and any new requests are run
123 to completion immediately. */ 123 to completion immediately. */
124 void (*set_run_to_completion)(void *send_info, int run_to_completion); 124 void (*set_run_to_completion)(void *send_info, bool run_to_completion);
125 125
126 /* Called to poll for work to do. This is so upper layers can 126 /* Called to poll for work to do. This is so upper layers can
127 poll for operations during things like crash dumps. */ 127 poll for operations during things like crash dumps. */
@@ -132,7 +132,7 @@ struct ipmi_smi_handlers {
132 setting. The message handler does the mode handling. Note 132 setting. The message handler does the mode handling. Note
133 that this is called from interrupt context, so it cannot 133 that this is called from interrupt context, so it cannot
134 block. */ 134 block. */
135 void (*set_maintenance_mode)(void *send_info, int enable); 135 void (*set_maintenance_mode)(void *send_info, bool enable);
136 136
137 /* Tell the handler that we are using it/not using it. The 137 /* Tell the handler that we are using it/not using it. The
138 message handler get the modules that this handler belongs 138 message handler get the modules that this handler belongs