aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
authorCorey Minyard <minyard@acm.org>2005-11-07 04:00:03 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:44 -0500
commita9a2c44ff0a1350f8bfe3a162ecf71b1c9ce5cc2 (patch)
treeac5ffc65084b48895239e9044893ae7e0b2d91b1 /drivers/char/ipmi
parentc3e7e7916ec61cf58c88af12f4db17f28cffd83a (diff)
[PATCH] ipmi: add timer thread
We must poll for responses to commands when interrupts aren't in use. The default poll interval is based on using a kernel timer, which varies with HZ. For character-based interfaces like KCS and SMIC though, that can be way too slow (>15 minutes to flash a new firmware with KCS, >20 seconds to retrieve the sensor list). This creates a low-priority kernel thread to poll more often. If the state machine is idle, so is the kernel thread. But if there's an active command, it polls quite rapidly. This decrease a firmware flash time from 15 minutes to 1.5 minutes, and the sensor list time to 4.5 seconds, on a Dell PowerEdge x8x system. The timer-based polling remains, to ensure some amount of responsiveness even under high user process CPU load. Checking for a stopped timer at rmmod now uses atomics and del_timer_sync() to ensure safe stoppage. Signed-off-by: Matt Domsch <Matt_Domsch@dell.com> Signed-off-by: Corey Minyard <minyard@acm.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c84
1 files changed, 63 insertions, 21 deletions
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index d514df7c7283..fa3be622ca97 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -126,6 +126,7 @@ struct ipmi_device_id {
126 126
127struct smi_info 127struct smi_info
128{ 128{
129 int intf_num;
129 ipmi_smi_t intf; 130 ipmi_smi_t intf;
130 struct si_sm_data *si_sm; 131 struct si_sm_data *si_sm;
131 struct si_sm_handlers *handlers; 132 struct si_sm_handlers *handlers;
@@ -193,8 +194,7 @@ struct smi_info
193 unsigned long last_timeout_jiffies; 194 unsigned long last_timeout_jiffies;
194 195
195 /* Used to gracefully stop the timer without race conditions. */ 196 /* Used to gracefully stop the timer without race conditions. */
196 volatile int stop_operation; 197 atomic_t stop_operation;
197 volatile int timer_stopped;
198 198
199 /* The driver will disable interrupts when it gets into a 199 /* The driver will disable interrupts when it gets into a
200 situation where it cannot handle messages due to lack of 200 situation where it cannot handle messages due to lack of
@@ -221,6 +221,9 @@ struct smi_info
221 unsigned long events; 221 unsigned long events;
222 unsigned long watchdog_pretimeouts; 222 unsigned long watchdog_pretimeouts;
223 unsigned long incoming_messages; 223 unsigned long incoming_messages;
224
225 struct completion exiting;
226 long thread_pid;
224}; 227};
225 228
226static struct notifier_block *xaction_notifier_list; 229static struct notifier_block *xaction_notifier_list;
@@ -779,6 +782,38 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
779 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 782 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
780} 783}
781 784
785static int ipmi_thread(void *data)
786{
787 struct smi_info *smi_info = data;
788 unsigned long flags, last=1;
789 enum si_sm_result smi_result;
790
791 daemonize("kipmi%d", smi_info->intf_num);
792 allow_signal(SIGKILL);
793 set_user_nice(current, 19);
794 while (!atomic_read(&smi_info->stop_operation)) {
795 schedule_timeout(last);
796 spin_lock_irqsave(&(smi_info->si_lock), flags);
797 smi_result=smi_event_handler(smi_info, 0);
798 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
799 if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
800 last = 0;
801 else if (smi_result == SI_SM_CALL_WITH_DELAY) {
802 udelay(1);
803 last = 0;
804 }
805 else {
806 /* System is idle; go to sleep */
807 last = 1;
808 current->state = TASK_INTERRUPTIBLE;
809 }
810 }
811 smi_info->thread_pid = 0;
812 complete_and_exit(&(smi_info->exiting), 0);
813 return 0;
814}
815
816
782static void poll(void *send_info) 817static void poll(void *send_info)
783{ 818{
784 struct smi_info *smi_info = send_info; 819 struct smi_info *smi_info = send_info;
@@ -837,10 +872,8 @@ static void smi_timeout(unsigned long data)
837 struct timeval t; 872 struct timeval t;
838#endif 873#endif
839 874
840 if (smi_info->stop_operation) { 875 if (atomic_read(&smi_info->stop_operation))
841 smi_info->timer_stopped = 1;
842 return; 876 return;
843 }
844 877
845 spin_lock_irqsave(&(smi_info->si_lock), flags); 878 spin_lock_irqsave(&(smi_info->si_lock), flags);
846#ifdef DEBUG_TIMING 879#ifdef DEBUG_TIMING
@@ -913,7 +946,7 @@ static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
913 smi_info->interrupts++; 946 smi_info->interrupts++;
914 spin_unlock(&smi_info->count_lock); 947 spin_unlock(&smi_info->count_lock);
915 948
916 if (smi_info->stop_operation) 949 if (atomic_read(&smi_info->stop_operation))
917 goto out; 950 goto out;
918 951
919#ifdef DEBUG_TIMING 952#ifdef DEBUG_TIMING
@@ -1432,7 +1465,7 @@ static u32 ipmi_acpi_gpe(void *context)
1432 smi_info->interrupts++; 1465 smi_info->interrupts++;
1433 spin_unlock(&smi_info->count_lock); 1466 spin_unlock(&smi_info->count_lock);
1434 1467
1435 if (smi_info->stop_operation) 1468 if (atomic_read(&smi_info->stop_operation))
1436 goto out; 1469 goto out;
1437 1470
1438#ifdef DEBUG_TIMING 1471#ifdef DEBUG_TIMING
@@ -2177,6 +2210,16 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
2177 setup_dell_poweredge_bt_xaction_handler(smi_info); 2210 setup_dell_poweredge_bt_xaction_handler(smi_info);
2178} 2211}
2179 2212
2213static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2214{
2215 if (smi_info->thread_pid > 0) {
2216 /* wake the potentially sleeping thread */
2217 kill_proc(smi_info->thread_pid, SIGKILL, 0);
2218 wait_for_completion(&(smi_info->exiting));
2219 }
2220 del_timer_sync(&smi_info->si_timer);
2221}
2222
2180/* Returns 0 if initialized, or negative on an error. */ 2223/* Returns 0 if initialized, or negative on an error. */
2181static int init_one_smi(int intf_num, struct smi_info **smi) 2224static int init_one_smi(int intf_num, struct smi_info **smi)
2182{ 2225{
@@ -2284,8 +2327,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2284 new_smi->run_to_completion = 0; 2327 new_smi->run_to_completion = 0;
2285 2328
2286 new_smi->interrupt_disabled = 0; 2329 new_smi->interrupt_disabled = 0;
2287 new_smi->timer_stopped = 0; 2330 atomic_set(&new_smi->stop_operation, 0);
2288 new_smi->stop_operation = 0; 2331 new_smi->intf_num = intf_num;
2289 2332
2290 /* Start clearing the flags before we enable interrupts or the 2333 /* Start clearing the flags before we enable interrupts or the
2291 timer to avoid racing with the timer. */ 2334 timer to avoid racing with the timer. */
@@ -2303,7 +2346,14 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2303 new_smi->si_timer.function = smi_timeout; 2346 new_smi->si_timer.function = smi_timeout;
2304 new_smi->last_timeout_jiffies = jiffies; 2347 new_smi->last_timeout_jiffies = jiffies;
2305 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 2348 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2349
2306 add_timer(&(new_smi->si_timer)); 2350 add_timer(&(new_smi->si_timer));
2351 if (new_smi->si_type != SI_BT) {
2352 init_completion(&(new_smi->exiting));
2353 new_smi->thread_pid = kernel_thread(ipmi_thread, new_smi,
2354 CLONE_FS|CLONE_FILES|
2355 CLONE_SIGHAND);
2356 }
2307 2357
2308 rv = ipmi_register_smi(&handlers, 2358 rv = ipmi_register_smi(&handlers,
2309 new_smi, 2359 new_smi,
@@ -2345,12 +2395,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2345 return 0; 2395 return 0;
2346 2396
2347 out_err_stop_timer: 2397 out_err_stop_timer:
2348 new_smi->stop_operation = 1; 2398 atomic_inc(&new_smi->stop_operation);
2349 2399 wait_for_timer_and_thread(new_smi);
2350 /* Wait for the timer to stop. This avoids problems with race
2351 conditions removing the timer here. */
2352 while (!new_smi->timer_stopped)
2353 schedule_timeout_uninterruptible(1);
2354 2400
2355 out_err: 2401 out_err:
2356 if (new_smi->intf) 2402 if (new_smi->intf)
@@ -2456,8 +2502,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
2456 spin_lock_irqsave(&(to_clean->si_lock), flags); 2502 spin_lock_irqsave(&(to_clean->si_lock), flags);
2457 spin_lock(&(to_clean->msg_lock)); 2503 spin_lock(&(to_clean->msg_lock));
2458 2504
2459 to_clean->stop_operation = 1; 2505 atomic_inc(&to_clean->stop_operation);
2460
2461 to_clean->irq_cleanup(to_clean); 2506 to_clean->irq_cleanup(to_clean);
2462 2507
2463 spin_unlock(&(to_clean->msg_lock)); 2508 spin_unlock(&(to_clean->msg_lock));
@@ -2468,10 +2513,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
2468 interrupt. */ 2513 interrupt. */
2469 synchronize_sched(); 2514 synchronize_sched();
2470 2515
2471 /* Wait for the timer to stop. This avoids problems with race 2516 wait_for_timer_and_thread(to_clean);
2472 conditions removing the timer here. */
2473 while (!to_clean->timer_stopped)
2474 schedule_timeout_uninterruptible(1);
2475 2517
2476 /* Interrupts and timeouts are stopped, now make sure the 2518 /* Interrupts and timeouts are stopped, now make sure the
2477 interface is in a clean state. */ 2519 interface is in a clean state. */