diff options
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 219 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 51 | ||||
-rw-r--r-- | include/linux/ipmi.h | 2 | ||||
-rw-r--r-- | include/linux/ipmi_smi.h | 7 |
4 files changed, 182 insertions, 97 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index ec4e10fcf1a5..872c4ec79bf4 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -55,6 +55,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); | |||
55 | static int ipmi_init_msghandler(void); | 55 | static int ipmi_init_msghandler(void); |
56 | static void smi_recv_tasklet(unsigned long); | 56 | static void smi_recv_tasklet(unsigned long); |
57 | static void handle_new_recv_msgs(ipmi_smi_t intf); | 57 | static void handle_new_recv_msgs(ipmi_smi_t intf); |
58 | static void need_waiter(ipmi_smi_t intf); | ||
58 | 59 | ||
59 | static int initialized; | 60 | static int initialized; |
60 | 61 | ||
@@ -73,6 +74,20 @@ static struct proc_dir_entry *proc_ipmi_root; | |||
73 | */ | 74 | */ |
74 | #define MAX_MSG_TIMEOUT 60000 | 75 | #define MAX_MSG_TIMEOUT 60000 |
75 | 76 | ||
77 | /* Call every ~1000 ms. */ | ||
78 | #define IPMI_TIMEOUT_TIME 1000 | ||
79 | |||
80 | /* How many jiffies does it take to get to the timeout time. */ | ||
81 | #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) | ||
82 | |||
83 | /* | ||
84 | * Request events from the queue every second (this is the number of | ||
85 | * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the | ||
86 | * future, IPMI will add a way to know immediately if an event is in | ||
87 | * the queue and this silliness can go away. | ||
88 | */ | ||
89 | #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) | ||
90 | |||
76 | /* | 91 | /* |
77 | * The main "user" data structure. | 92 | * The main "user" data structure. |
78 | */ | 93 | */ |
@@ -92,7 +107,7 @@ struct ipmi_user { | |||
92 | ipmi_smi_t intf; | 107 | ipmi_smi_t intf; |
93 | 108 | ||
94 | /* Does this interface receive IPMI events? */ | 109 | /* Does this interface receive IPMI events? */ |
95 | int gets_events; | 110 | bool gets_events; |
96 | }; | 111 | }; |
97 | 112 | ||
98 | struct cmd_rcvr { | 113 | struct cmd_rcvr { |
@@ -383,6 +398,9 @@ struct ipmi_smi { | |||
383 | unsigned int waiting_events_count; /* How many events in queue? */ | 398 | unsigned int waiting_events_count; /* How many events in queue? */ |
384 | char delivering_events; | 399 | char delivering_events; |
385 | char event_msg_printed; | 400 | char event_msg_printed; |
401 | atomic_t event_waiters; | ||
402 | unsigned int ticks_to_req_ev; | ||
403 | int last_needs_timer; | ||
386 | 404 | ||
387 | /* | 405 | /* |
388 | * The event receiver for my BMC, only really used at panic | 406 | * The event receiver for my BMC, only really used at panic |
@@ -451,7 +469,6 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex); | |||
451 | static LIST_HEAD(smi_watchers); | 469 | static LIST_HEAD(smi_watchers); |
452 | static DEFINE_MUTEX(smi_watchers_mutex); | 470 | static DEFINE_MUTEX(smi_watchers_mutex); |
453 | 471 | ||
454 | |||
455 | #define ipmi_inc_stat(intf, stat) \ | 472 | #define ipmi_inc_stat(intf, stat) \ |
456 | atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) | 473 | atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) |
457 | #define ipmi_get_stat(intf, stat) \ | 474 | #define ipmi_get_stat(intf, stat) \ |
@@ -772,6 +789,7 @@ static int intf_next_seq(ipmi_smi_t intf, | |||
772 | *seq = i; | 789 | *seq = i; |
773 | *seqid = intf->seq_table[i].seqid; | 790 | *seqid = intf->seq_table[i].seqid; |
774 | intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; | 791 | intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; |
792 | need_waiter(intf); | ||
775 | } else { | 793 | } else { |
776 | rv = -EAGAIN; | 794 | rv = -EAGAIN; |
777 | } | 795 | } |
@@ -941,7 +959,7 @@ int ipmi_create_user(unsigned int if_num, | |||
941 | new_user->handler = handler; | 959 | new_user->handler = handler; |
942 | new_user->handler_data = handler_data; | 960 | new_user->handler_data = handler_data; |
943 | new_user->intf = intf; | 961 | new_user->intf = intf; |
944 | new_user->gets_events = 0; | 962 | new_user->gets_events = false; |
945 | 963 | ||
946 | if (!try_module_get(intf->handlers->owner)) { | 964 | if (!try_module_get(intf->handlers->owner)) { |
947 | rv = -ENODEV; | 965 | rv = -ENODEV; |
@@ -966,6 +984,11 @@ int ipmi_create_user(unsigned int if_num, | |||
966 | spin_lock_irqsave(&intf->seq_lock, flags); | 984 | spin_lock_irqsave(&intf->seq_lock, flags); |
967 | list_add_rcu(&new_user->link, &intf->users); | 985 | list_add_rcu(&new_user->link, &intf->users); |
968 | spin_unlock_irqrestore(&intf->seq_lock, flags); | 986 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
987 | if (handler->ipmi_watchdog_pretimeout) { | ||
988 | /* User wants pretimeouts, so make sure to watch for them. */ | ||
989 | if (atomic_inc_return(&intf->event_waiters) == 1) | ||
990 | need_waiter(intf); | ||
991 | } | ||
969 | *user = new_user; | 992 | *user = new_user; |
970 | return 0; | 993 | return 0; |
971 | 994 | ||
@@ -1021,6 +1044,12 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
1021 | 1044 | ||
1022 | user->valid = 0; | 1045 | user->valid = 0; |
1023 | 1046 | ||
1047 | if (user->handler->ipmi_watchdog_pretimeout) | ||
1048 | atomic_dec(&intf->event_waiters); | ||
1049 | |||
1050 | if (user->gets_events) | ||
1051 | atomic_dec(&intf->event_waiters); | ||
1052 | |||
1024 | /* Remove the user from the interface's sequence table. */ | 1053 | /* Remove the user from the interface's sequence table. */ |
1025 | spin_lock_irqsave(&intf->seq_lock, flags); | 1054 | spin_lock_irqsave(&intf->seq_lock, flags); |
1026 | list_del_rcu(&user->link); | 1055 | list_del_rcu(&user->link); |
@@ -1184,7 +1213,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) | |||
1184 | } | 1213 | } |
1185 | EXPORT_SYMBOL(ipmi_set_maintenance_mode); | 1214 | EXPORT_SYMBOL(ipmi_set_maintenance_mode); |
1186 | 1215 | ||
1187 | int ipmi_set_gets_events(ipmi_user_t user, int val) | 1216 | int ipmi_set_gets_events(ipmi_user_t user, bool val) |
1188 | { | 1217 | { |
1189 | unsigned long flags; | 1218 | unsigned long flags; |
1190 | ipmi_smi_t intf = user->intf; | 1219 | ipmi_smi_t intf = user->intf; |
@@ -1194,8 +1223,18 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) | |||
1194 | INIT_LIST_HEAD(&msgs); | 1223 | INIT_LIST_HEAD(&msgs); |
1195 | 1224 | ||
1196 | spin_lock_irqsave(&intf->events_lock, flags); | 1225 | spin_lock_irqsave(&intf->events_lock, flags); |
1226 | if (user->gets_events == val) | ||
1227 | goto out; | ||
1228 | |||
1197 | user->gets_events = val; | 1229 | user->gets_events = val; |
1198 | 1230 | ||
1231 | if (val) { | ||
1232 | if (atomic_inc_return(&intf->event_waiters) == 1) | ||
1233 | need_waiter(intf); | ||
1234 | } else { | ||
1235 | atomic_dec(&intf->event_waiters); | ||
1236 | } | ||
1237 | |||
1199 | if (intf->delivering_events) | 1238 | if (intf->delivering_events) |
1200 | /* | 1239 | /* |
1201 | * Another thread is delivering events for this, so | 1240 | * Another thread is delivering events for this, so |
@@ -1289,6 +1328,9 @@ int ipmi_register_for_cmd(ipmi_user_t user, | |||
1289 | goto out_unlock; | 1328 | goto out_unlock; |
1290 | } | 1329 | } |
1291 | 1330 | ||
1331 | if (atomic_inc_return(&intf->event_waiters) == 1) | ||
1332 | need_waiter(intf); | ||
1333 | |||
1292 | list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); | 1334 | list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); |
1293 | 1335 | ||
1294 | out_unlock: | 1336 | out_unlock: |
@@ -1330,6 +1372,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, | |||
1330 | mutex_unlock(&intf->cmd_rcvrs_mutex); | 1372 | mutex_unlock(&intf->cmd_rcvrs_mutex); |
1331 | synchronize_rcu(); | 1373 | synchronize_rcu(); |
1332 | while (rcvrs) { | 1374 | while (rcvrs) { |
1375 | atomic_dec(&intf->event_waiters); | ||
1333 | rcvr = rcvrs; | 1376 | rcvr = rcvrs; |
1334 | rcvrs = rcvr->next; | 1377 | rcvrs = rcvr->next; |
1335 | kfree(rcvr); | 1378 | kfree(rcvr); |
@@ -2876,6 +2919,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2876 | (unsigned long) intf); | 2919 | (unsigned long) intf); |
2877 | atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); | 2920 | atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); |
2878 | spin_lock_init(&intf->events_lock); | 2921 | spin_lock_init(&intf->events_lock); |
2922 | atomic_set(&intf->event_waiters, 0); | ||
2923 | intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; | ||
2879 | INIT_LIST_HEAD(&intf->waiting_events); | 2924 | INIT_LIST_HEAD(&intf->waiting_events); |
2880 | intf->waiting_events_count = 0; | 2925 | intf->waiting_events_count = 0; |
2881 | mutex_init(&intf->cmd_rcvrs_mutex); | 2926 | mutex_init(&intf->cmd_rcvrs_mutex); |
@@ -3965,7 +4010,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, | |||
3965 | 4010 | ||
3966 | static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | 4011 | static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, |
3967 | struct list_head *timeouts, long timeout_period, | 4012 | struct list_head *timeouts, long timeout_period, |
3968 | int slot, unsigned long *flags) | 4013 | int slot, unsigned long *flags, |
4014 | unsigned int *waiting_msgs) | ||
3969 | { | 4015 | { |
3970 | struct ipmi_recv_msg *msg; | 4016 | struct ipmi_recv_msg *msg; |
3971 | struct ipmi_smi_handlers *handlers; | 4017 | struct ipmi_smi_handlers *handlers; |
@@ -3977,8 +4023,10 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3977 | return; | 4023 | return; |
3978 | 4024 | ||
3979 | ent->timeout -= timeout_period; | 4025 | ent->timeout -= timeout_period; |
3980 | if (ent->timeout > 0) | 4026 | if (ent->timeout > 0) { |
4027 | (*waiting_msgs)++; | ||
3981 | return; | 4028 | return; |
4029 | } | ||
3982 | 4030 | ||
3983 | if (ent->retries_left == 0) { | 4031 | if (ent->retries_left == 0) { |
3984 | /* The message has used all its retries. */ | 4032 | /* The message has used all its retries. */ |
@@ -3995,6 +4043,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3995 | struct ipmi_smi_msg *smi_msg; | 4043 | struct ipmi_smi_msg *smi_msg; |
3996 | /* More retries, send again. */ | 4044 | /* More retries, send again. */ |
3997 | 4045 | ||
4046 | (*waiting_msgs)++; | ||
4047 | |||
3998 | /* | 4048 | /* |
3999 | * Start with the max timer, set to normal timer after | 4049 | * Start with the max timer, set to normal timer after |
4000 | * the message is sent. | 4050 | * the message is sent. |
@@ -4040,117 +4090,118 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
4040 | } | 4090 | } |
4041 | } | 4091 | } |
4042 | 4092 | ||
4043 | static void ipmi_timeout_handler(long timeout_period) | 4093 | static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) |
4044 | { | 4094 | { |
4045 | ipmi_smi_t intf; | ||
4046 | struct list_head timeouts; | 4095 | struct list_head timeouts; |
4047 | struct ipmi_recv_msg *msg, *msg2; | 4096 | struct ipmi_recv_msg *msg, *msg2; |
4048 | unsigned long flags; | 4097 | unsigned long flags; |
4049 | int i; | 4098 | int i; |
4099 | unsigned int waiting_msgs = 0; | ||
4050 | 4100 | ||
4051 | rcu_read_lock(); | 4101 | /* |
4052 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 4102 | * Go through the seq table and find any messages that |
4053 | tasklet_schedule(&intf->recv_tasklet); | 4103 | * have timed out, putting them in the timeouts |
4054 | 4104 | * list. | |
4055 | /* | 4105 | */ |
4056 | * Go through the seq table and find any messages that | 4106 | INIT_LIST_HEAD(&timeouts); |
4057 | * have timed out, putting them in the timeouts | 4107 | spin_lock_irqsave(&intf->seq_lock, flags); |
4058 | * list. | 4108 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) |
4059 | */ | 4109 | check_msg_timeout(intf, &(intf->seq_table[i]), |
4060 | INIT_LIST_HEAD(&timeouts); | 4110 | &timeouts, timeout_period, i, |
4061 | spin_lock_irqsave(&intf->seq_lock, flags); | 4111 | &flags, &waiting_msgs); |
4062 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) | 4112 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
4063 | check_msg_timeout(intf, &(intf->seq_table[i]), | ||
4064 | &timeouts, timeout_period, i, | ||
4065 | &flags); | ||
4066 | spin_unlock_irqrestore(&intf->seq_lock, flags); | ||
4067 | 4113 | ||
4068 | list_for_each_entry_safe(msg, msg2, &timeouts, link) | 4114 | list_for_each_entry_safe(msg, msg2, &timeouts, link) |
4069 | deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); | 4115 | deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); |
4070 | 4116 | ||
4071 | /* | 4117 | /* |
4072 | * Maintenance mode handling. Check the timeout | 4118 | * Maintenance mode handling. Check the timeout |
4073 | * optimistically before we claim the lock. It may | 4119 | * optimistically before we claim the lock. It may |
4074 | * mean a timeout gets missed occasionally, but that | 4120 | * mean a timeout gets missed occasionally, but that |
4075 | * only means the timeout gets extended by one period | 4121 | * only means the timeout gets extended by one period |
4076 | * in that case. No big deal, and it avoids the lock | 4122 | * in that case. No big deal, and it avoids the lock |
4077 | * most of the time. | 4123 | * most of the time. |
4078 | */ | 4124 | */ |
4125 | if (intf->auto_maintenance_timeout > 0) { | ||
4126 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); | ||
4079 | if (intf->auto_maintenance_timeout > 0) { | 4127 | if (intf->auto_maintenance_timeout > 0) { |
4080 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); | 4128 | intf->auto_maintenance_timeout |
4081 | if (intf->auto_maintenance_timeout > 0) { | 4129 | -= timeout_period; |
4082 | intf->auto_maintenance_timeout | 4130 | if (!intf->maintenance_mode |
4083 | -= timeout_period; | 4131 | && (intf->auto_maintenance_timeout <= 0)) { |
4084 | if (!intf->maintenance_mode | 4132 | intf->maintenance_mode_enable = 0; |
4085 | && (intf->auto_maintenance_timeout <= 0)) { | 4133 | maintenance_mode_update(intf); |
4086 | intf->maintenance_mode_enable = 0; | ||
4087 | maintenance_mode_update(intf); | ||
4088 | } | ||
4089 | } | 4134 | } |
4090 | spin_unlock_irqrestore(&intf->maintenance_mode_lock, | ||
4091 | flags); | ||
4092 | } | 4135 | } |
4136 | spin_unlock_irqrestore(&intf->maintenance_mode_lock, | ||
4137 | flags); | ||
4093 | } | 4138 | } |
4094 | rcu_read_unlock(); | 4139 | |
4140 | tasklet_schedule(&intf->recv_tasklet); | ||
4141 | |||
4142 | return waiting_msgs; | ||
4095 | } | 4143 | } |
4096 | 4144 | ||
4097 | static void ipmi_request_event(void) | 4145 | static void ipmi_request_event(ipmi_smi_t intf) |
4098 | { | 4146 | { |
4099 | ipmi_smi_t intf; | ||
4100 | struct ipmi_smi_handlers *handlers; | 4147 | struct ipmi_smi_handlers *handlers; |
4101 | 4148 | ||
4102 | rcu_read_lock(); | 4149 | /* No event requests when in maintenance mode. */ |
4103 | /* | 4150 | if (intf->maintenance_mode_enable) |
4104 | * Called from the timer, no need to check if handlers is | 4151 | return; |
4105 | * valid. | ||
4106 | */ | ||
4107 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | ||
4108 | /* No event requests when in maintenance mode. */ | ||
4109 | if (intf->maintenance_mode_enable) | ||
4110 | continue; | ||
4111 | 4152 | ||
4112 | handlers = intf->handlers; | 4153 | handlers = intf->handlers; |
4113 | if (handlers) | 4154 | if (handlers) |
4114 | handlers->request_events(intf->send_info); | 4155 | handlers->request_events(intf->send_info); |
4115 | } | ||
4116 | rcu_read_unlock(); | ||
4117 | } | 4156 | } |
4118 | 4157 | ||
4119 | static struct timer_list ipmi_timer; | 4158 | static struct timer_list ipmi_timer; |
4120 | 4159 | ||
4121 | /* Call every ~1000 ms. */ | ||
4122 | #define IPMI_TIMEOUT_TIME 1000 | ||
4123 | |||
4124 | /* How many jiffies does it take to get to the timeout time. */ | ||
4125 | #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) | ||
4126 | |||
4127 | /* | ||
4128 | * Request events from the queue every second (this is the number of | ||
4129 | * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the | ||
4130 | * future, IPMI will add a way to know immediately if an event is in | ||
4131 | * the queue and this silliness can go away. | ||
4132 | */ | ||
4133 | #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) | ||
4134 | |||
4135 | static atomic_t stop_operation; | 4160 | static atomic_t stop_operation; |
4136 | static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME; | ||
4137 | 4161 | ||
4138 | static void ipmi_timeout(unsigned long data) | 4162 | static void ipmi_timeout(unsigned long data) |
4139 | { | 4163 | { |
4164 | ipmi_smi_t intf; | ||
4165 | int nt = 0; | ||
4166 | |||
4140 | if (atomic_read(&stop_operation)) | 4167 | if (atomic_read(&stop_operation)) |
4141 | return; | 4168 | return; |
4142 | 4169 | ||
4143 | ticks_to_req_ev--; | 4170 | rcu_read_lock(); |
4144 | if (ticks_to_req_ev == 0) { | 4171 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
4145 | ipmi_request_event(); | 4172 | int lnt = 0; |
4146 | ticks_to_req_ev = IPMI_REQUEST_EV_TIME; | 4173 | |
4147 | } | 4174 | if (atomic_read(&intf->event_waiters)) { |
4175 | intf->ticks_to_req_ev--; | ||
4176 | if (intf->ticks_to_req_ev == 0) { | ||
4177 | ipmi_request_event(intf); | ||
4178 | intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; | ||
4179 | } | ||
4180 | lnt++; | ||
4181 | } | ||
4148 | 4182 | ||
4149 | ipmi_timeout_handler(IPMI_TIMEOUT_TIME); | 4183 | lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); |
4150 | 4184 | ||
4151 | mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); | 4185 | lnt = !!lnt; |
4186 | if (lnt != intf->last_needs_timer && | ||
4187 | intf->handlers->set_need_watch) | ||
4188 | intf->handlers->set_need_watch(intf->send_info, lnt); | ||
4189 | intf->last_needs_timer = lnt; | ||
4190 | |||
4191 | nt += lnt; | ||
4192 | } | ||
4193 | rcu_read_unlock(); | ||
4194 | |||
4195 | if (nt) | ||
4196 | mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); | ||
4152 | } | 4197 | } |
4153 | 4198 | ||
4199 | static void need_waiter(ipmi_smi_t intf) | ||
4200 | { | ||
4201 | /* Racy, but worst case we start the timer twice. */ | ||
4202 | if (!timer_pending(&ipmi_timer)) | ||
4203 | mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); | ||
4204 | } | ||
4154 | 4205 | ||
4155 | static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); | 4206 | static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); |
4156 | static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); | 4207 | static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 25a1436a4291..444ea548dfe3 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -257,6 +257,9 @@ struct smi_info { | |||
257 | /* Used to gracefully stop the timer without race conditions. */ | 257 | /* Used to gracefully stop the timer without race conditions. */ |
258 | atomic_t stop_operation; | 258 | atomic_t stop_operation; |
259 | 259 | ||
260 | /* Are we waiting for the events, pretimeouts, received msgs? */ | ||
261 | atomic_t need_watch; | ||
262 | |||
260 | /* | 263 | /* |
261 | * The driver will disable interrupts when it gets into a | 264 | * The driver will disable interrupts when it gets into a |
262 | * situation where it cannot handle messages due to lack of | 265 | * situation where it cannot handle messages due to lack of |
@@ -862,6 +865,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
862 | return si_sm_result; | 865 | return si_sm_result; |
863 | } | 866 | } |
864 | 867 | ||
868 | static void check_start_timer_thread(struct smi_info *smi_info) | ||
869 | { | ||
870 | if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { | ||
871 | smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); | ||
872 | |||
873 | if (smi_info->thread) | ||
874 | wake_up_process(smi_info->thread); | ||
875 | |||
876 | start_next_msg(smi_info); | ||
877 | smi_event_handler(smi_info, 0); | ||
878 | } | ||
879 | } | ||
880 | |||
865 | static void sender(void *send_info, | 881 | static void sender(void *send_info, |
866 | struct ipmi_smi_msg *msg, | 882 | struct ipmi_smi_msg *msg, |
867 | int priority) | 883 | int priority) |
@@ -915,15 +931,7 @@ static void sender(void *send_info, | |||
915 | else | 931 | else |
916 | list_add_tail(&msg->link, &smi_info->xmit_msgs); | 932 | list_add_tail(&msg->link, &smi_info->xmit_msgs); |
917 | 933 | ||
918 | if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { | 934 | check_start_timer_thread(smi_info); |
919 | smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); | ||
920 | |||
921 | if (smi_info->thread) | ||
922 | wake_up_process(smi_info->thread); | ||
923 | |||
924 | start_next_msg(smi_info); | ||
925 | smi_event_handler(smi_info, 0); | ||
926 | } | ||
927 | spin_unlock_irqrestore(&smi_info->si_lock, flags); | 935 | spin_unlock_irqrestore(&smi_info->si_lock, flags); |
928 | } | 936 | } |
929 | 937 | ||
@@ -1023,9 +1031,15 @@ static int ipmi_thread(void *data) | |||
1023 | ; /* do nothing */ | 1031 | ; /* do nothing */ |
1024 | else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) | 1032 | else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) |
1025 | schedule(); | 1033 | schedule(); |
1026 | else if (smi_result == SI_SM_IDLE) | 1034 | else if (smi_result == SI_SM_IDLE) { |
1027 | schedule_timeout_interruptible(100); | 1035 | if (atomic_read(&smi_info->need_watch)) { |
1028 | else | 1036 | schedule_timeout_interruptible(100); |
1037 | } else { | ||
1038 | /* Wait to be woken up when we are needed. */ | ||
1039 | __set_current_state(TASK_INTERRUPTIBLE); | ||
1040 | schedule(); | ||
1041 | } | ||
1042 | } else | ||
1029 | schedule_timeout_interruptible(1); | 1043 | schedule_timeout_interruptible(1); |
1030 | } | 1044 | } |
1031 | return 0; | 1045 | return 0; |
@@ -1061,6 +1075,17 @@ static void request_events(void *send_info) | |||
1061 | atomic_set(&smi_info->req_events, 1); | 1075 | atomic_set(&smi_info->req_events, 1); |
1062 | } | 1076 | } |
1063 | 1077 | ||
1078 | static void set_need_watch(void *send_info, int enable) | ||
1079 | { | ||
1080 | struct smi_info *smi_info = send_info; | ||
1081 | unsigned long flags; | ||
1082 | |||
1083 | atomic_set(&smi_info->need_watch, enable); | ||
1084 | spin_lock_irqsave(&smi_info->si_lock, flags); | ||
1085 | check_start_timer_thread(smi_info); | ||
1086 | spin_unlock_irqrestore(&smi_info->si_lock, flags); | ||
1087 | } | ||
1088 | |||
1064 | static int initialized; | 1089 | static int initialized; |
1065 | 1090 | ||
1066 | static void smi_timeout(unsigned long data) | 1091 | static void smi_timeout(unsigned long data) |
@@ -1212,6 +1237,7 @@ static struct ipmi_smi_handlers handlers = { | |||
1212 | .get_smi_info = get_smi_info, | 1237 | .get_smi_info = get_smi_info, |
1213 | .sender = sender, | 1238 | .sender = sender, |
1214 | .request_events = request_events, | 1239 | .request_events = request_events, |
1240 | .set_need_watch = set_need_watch, | ||
1215 | .set_maintenance_mode = set_maintenance_mode, | 1241 | .set_maintenance_mode = set_maintenance_mode, |
1216 | .set_run_to_completion = set_run_to_completion, | 1242 | .set_run_to_completion = set_run_to_completion, |
1217 | .poll = poll, | 1243 | .poll = poll, |
@@ -3352,6 +3378,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3352 | 3378 | ||
3353 | new_smi->interrupt_disabled = 1; | 3379 | new_smi->interrupt_disabled = 1; |
3354 | atomic_set(&new_smi->stop_operation, 0); | 3380 | atomic_set(&new_smi->stop_operation, 0); |
3381 | atomic_set(&new_smi->need_watch, 0); | ||
3355 | new_smi->intf_num = smi_num; | 3382 | new_smi->intf_num = smi_num; |
3356 | smi_num++; | 3383 | smi_num++; |
3357 | 3384 | ||
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 1f9f56e28851..76d2acbfa7c6 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
@@ -237,7 +237,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); | |||
237 | * The first user that sets this to TRUE will receive all events that | 237 | * The first user that sets this to TRUE will receive all events that |
238 | * have been queued while no one was waiting for events. | 238 | * have been queued while no one was waiting for events. |
239 | */ | 239 | */ |
240 | int ipmi_set_gets_events(ipmi_user_t user, int val); | 240 | int ipmi_set_gets_events(ipmi_user_t user, bool val); |
241 | 241 | ||
242 | /* | 242 | /* |
243 | * Called when a new SMI is registered. This will also be called on | 243 | * Called when a new SMI is registered. This will also be called on |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 8ea3fe0b9759..2a7ff302d990 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
@@ -109,6 +109,13 @@ struct ipmi_smi_handlers { | |||
109 | events from the BMC we are attached to. */ | 109 | events from the BMC we are attached to. */ |
110 | void (*request_events)(void *send_info); | 110 | void (*request_events)(void *send_info); |
111 | 111 | ||
112 | /* Called by the upper layer when some user requires that the | ||
113 | interface watch for events, received messages, watchdog | ||
114 | pretimeouts, or not. Used by the SMI to know if it should | ||
115 | watch for these. This may be NULL if the SMI does not | ||
116 | implement it. */ | ||
117 | void (*set_need_watch)(void *send_info, int enable); | ||
118 | |||
112 | /* Called when the interface should go into "run to | 119 | /* Called when the interface should go into "run to |
113 | completion" mode. If this call sets the value to true, the | 120 | completion" mode. If this call sets the value to true, the |
114 | interface should make sure that all messages are flushed | 121 | interface should make sure that all messages are flushed |