diff options
Diffstat (limited to 'drivers/char/ipmi/ipmi_si_intf.c')
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 183 |
1 files changed, 152 insertions, 31 deletions
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index b6e5cbfb09f8..ea89dca3dbb5 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -51,6 +51,8 @@ | |||
51 | #include <linux/list.h> | 51 | #include <linux/list.h> |
52 | #include <linux/pci.h> | 52 | #include <linux/pci.h> |
53 | #include <linux/ioport.h> | 53 | #include <linux/ioport.h> |
54 | #include <linux/notifier.h> | ||
55 | #include <linux/kthread.h> | ||
54 | #include <asm/irq.h> | 56 | #include <asm/irq.h> |
55 | #ifdef CONFIG_HIGH_RES_TIMERS | 57 | #ifdef CONFIG_HIGH_RES_TIMERS |
56 | #include <linux/hrtime.h> | 58 | #include <linux/hrtime.h> |
@@ -125,6 +127,7 @@ struct ipmi_device_id { | |||
125 | 127 | ||
126 | struct smi_info | 128 | struct smi_info |
127 | { | 129 | { |
130 | int intf_num; | ||
128 | ipmi_smi_t intf; | 131 | ipmi_smi_t intf; |
129 | struct si_sm_data *si_sm; | 132 | struct si_sm_data *si_sm; |
130 | struct si_sm_handlers *handlers; | 133 | struct si_sm_handlers *handlers; |
@@ -192,8 +195,7 @@ struct smi_info | |||
192 | unsigned long last_timeout_jiffies; | 195 | unsigned long last_timeout_jiffies; |
193 | 196 | ||
194 | /* Used to gracefully stop the timer without race conditions. */ | 197 | /* Used to gracefully stop the timer without race conditions. */ |
195 | volatile int stop_operation; | 198 | atomic_t stop_operation; |
196 | volatile int timer_stopped; | ||
197 | 199 | ||
198 | /* The driver will disable interrupts when it gets into a | 200 | /* The driver will disable interrupts when it gets into a |
199 | situation where it cannot handle messages due to lack of | 201 | situation where it cannot handle messages due to lack of |
@@ -220,8 +222,16 @@ struct smi_info | |||
220 | unsigned long events; | 222 | unsigned long events; |
221 | unsigned long watchdog_pretimeouts; | 223 | unsigned long watchdog_pretimeouts; |
222 | unsigned long incoming_messages; | 224 | unsigned long incoming_messages; |
225 | |||
226 | struct task_struct *thread; | ||
223 | }; | 227 | }; |
224 | 228 | ||
229 | static struct notifier_block *xaction_notifier_list; | ||
230 | static int register_xaction_notifier(struct notifier_block * nb) | ||
231 | { | ||
232 | return notifier_chain_register(&xaction_notifier_list, nb); | ||
233 | } | ||
234 | |||
225 | static void si_restart_short_timer(struct smi_info *smi_info); | 235 | static void si_restart_short_timer(struct smi_info *smi_info); |
226 | 236 | ||
227 | static void deliver_recv_msg(struct smi_info *smi_info, | 237 | static void deliver_recv_msg(struct smi_info *smi_info, |
@@ -281,6 +291,11 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
281 | do_gettimeofday(&t); | 291 | do_gettimeofday(&t); |
282 | printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 292 | printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
283 | #endif | 293 | #endif |
294 | err = notifier_call_chain(&xaction_notifier_list, 0, smi_info); | ||
295 | if (err & NOTIFY_STOP_MASK) { | ||
296 | rv = SI_SM_CALL_WITHOUT_DELAY; | ||
297 | goto out; | ||
298 | } | ||
284 | err = smi_info->handlers->start_transaction( | 299 | err = smi_info->handlers->start_transaction( |
285 | smi_info->si_sm, | 300 | smi_info->si_sm, |
286 | smi_info->curr_msg->data, | 301 | smi_info->curr_msg->data, |
@@ -291,6 +306,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
291 | 306 | ||
292 | rv = SI_SM_CALL_WITHOUT_DELAY; | 307 | rv = SI_SM_CALL_WITHOUT_DELAY; |
293 | } | 308 | } |
309 | out: | ||
294 | spin_unlock(&(smi_info->msg_lock)); | 310 | spin_unlock(&(smi_info->msg_lock)); |
295 | 311 | ||
296 | return rv; | 312 | return rv; |
@@ -766,6 +782,29 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion) | |||
766 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 782 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
767 | } | 783 | } |
768 | 784 | ||
785 | static int ipmi_thread(void *data) | ||
786 | { | ||
787 | struct smi_info *smi_info = data; | ||
788 | unsigned long flags; | ||
789 | enum si_sm_result smi_result; | ||
790 | |||
791 | set_user_nice(current, 19); | ||
792 | while (!kthread_should_stop()) { | ||
793 | spin_lock_irqsave(&(smi_info->si_lock), flags); | ||
794 | smi_result=smi_event_handler(smi_info, 0); | ||
795 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | ||
796 | if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { | ||
797 | /* do nothing */ | ||
798 | } | ||
799 | else if (smi_result == SI_SM_CALL_WITH_DELAY) | ||
800 | udelay(1); | ||
801 | else | ||
802 | schedule_timeout_interruptible(1); | ||
803 | } | ||
804 | return 0; | ||
805 | } | ||
806 | |||
807 | |||
769 | static void poll(void *send_info) | 808 | static void poll(void *send_info) |
770 | { | 809 | { |
771 | struct smi_info *smi_info = send_info; | 810 | struct smi_info *smi_info = send_info; |
@@ -819,15 +858,13 @@ static void smi_timeout(unsigned long data) | |||
819 | enum si_sm_result smi_result; | 858 | enum si_sm_result smi_result; |
820 | unsigned long flags; | 859 | unsigned long flags; |
821 | unsigned long jiffies_now; | 860 | unsigned long jiffies_now; |
822 | unsigned long time_diff; | 861 | long time_diff; |
823 | #ifdef DEBUG_TIMING | 862 | #ifdef DEBUG_TIMING |
824 | struct timeval t; | 863 | struct timeval t; |
825 | #endif | 864 | #endif |
826 | 865 | ||
827 | if (smi_info->stop_operation) { | 866 | if (atomic_read(&smi_info->stop_operation)) |
828 | smi_info->timer_stopped = 1; | ||
829 | return; | 867 | return; |
830 | } | ||
831 | 868 | ||
832 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 869 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
833 | #ifdef DEBUG_TIMING | 870 | #ifdef DEBUG_TIMING |
@@ -835,7 +872,7 @@ static void smi_timeout(unsigned long data) | |||
835 | printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 872 | printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
836 | #endif | 873 | #endif |
837 | jiffies_now = jiffies; | 874 | jiffies_now = jiffies; |
838 | time_diff = ((jiffies_now - smi_info->last_timeout_jiffies) | 875 | time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) |
839 | * SI_USEC_PER_JIFFY); | 876 | * SI_USEC_PER_JIFFY); |
840 | smi_result = smi_event_handler(smi_info, time_diff); | 877 | smi_result = smi_event_handler(smi_info, time_diff); |
841 | 878 | ||
@@ -900,7 +937,7 @@ static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs) | |||
900 | smi_info->interrupts++; | 937 | smi_info->interrupts++; |
901 | spin_unlock(&smi_info->count_lock); | 938 | spin_unlock(&smi_info->count_lock); |
902 | 939 | ||
903 | if (smi_info->stop_operation) | 940 | if (atomic_read(&smi_info->stop_operation)) |
904 | goto out; | 941 | goto out; |
905 | 942 | ||
906 | #ifdef DEBUG_TIMING | 943 | #ifdef DEBUG_TIMING |
@@ -1419,7 +1456,7 @@ static u32 ipmi_acpi_gpe(void *context) | |||
1419 | smi_info->interrupts++; | 1456 | smi_info->interrupts++; |
1420 | spin_unlock(&smi_info->count_lock); | 1457 | spin_unlock(&smi_info->count_lock); |
1421 | 1458 | ||
1422 | if (smi_info->stop_operation) | 1459 | if (atomic_read(&smi_info->stop_operation)) |
1423 | goto out; | 1460 | goto out; |
1424 | 1461 | ||
1425 | #ifdef DEBUG_TIMING | 1462 | #ifdef DEBUG_TIMING |
@@ -1919,7 +1956,8 @@ static int try_get_dev_id(struct smi_info *smi_info) | |||
1919 | smi_result = smi_info->handlers->event(smi_info->si_sm, 0); | 1956 | smi_result = smi_info->handlers->event(smi_info->si_sm, 0); |
1920 | for (;;) | 1957 | for (;;) |
1921 | { | 1958 | { |
1922 | if (smi_result == SI_SM_CALL_WITH_DELAY) { | 1959 | if (smi_result == SI_SM_CALL_WITH_DELAY || |
1960 | smi_result == SI_SM_CALL_WITH_TICK_DELAY) { | ||
1923 | schedule_timeout_uninterruptible(1); | 1961 | schedule_timeout_uninterruptible(1); |
1924 | smi_result = smi_info->handlers->event( | 1962 | smi_result = smi_info->handlers->event( |
1925 | smi_info->si_sm, 100); | 1963 | smi_info->si_sm, 100); |
@@ -2052,6 +2090,9 @@ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) | |||
2052 | * IPMI Version = 0x51 IPMI 1.5 | 2090 | * IPMI Version = 0x51 IPMI 1.5 |
2053 | * Manufacturer ID = A2 02 00 Dell IANA | 2091 | * Manufacturer ID = A2 02 00 Dell IANA |
2054 | * | 2092 | * |
2093 | * Additionally, PowerEdge systems with IPMI < 1.5 may also assert | ||
2094 | * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. | ||
2095 | * | ||
2055 | */ | 2096 | */ |
2056 | #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 | 2097 | #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 |
2057 | #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 | 2098 | #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 |
@@ -2061,16 +2102,87 @@ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) | |||
2061 | { | 2102 | { |
2062 | struct ipmi_device_id *id = &smi_info->device_id; | 2103 | struct ipmi_device_id *id = &smi_info->device_id; |
2063 | const char mfr[3]=DELL_IANA_MFR_ID; | 2104 | const char mfr[3]=DELL_IANA_MFR_ID; |
2064 | if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) | 2105 | if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) { |
2065 | && (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID) | 2106 | if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && |
2066 | && (id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV) | 2107 | id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && |
2067 | && (id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION)) | 2108 | id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { |
2068 | { | 2109 | smi_info->oem_data_avail_handler = |
2069 | smi_info->oem_data_avail_handler = | 2110 | oem_data_avail_to_receive_msg_avail; |
2070 | oem_data_avail_to_receive_msg_avail; | 2111 | } |
2112 | else if (ipmi_version_major(id) < 1 || | ||
2113 | (ipmi_version_major(id) == 1 && | ||
2114 | ipmi_version_minor(id) < 5)) { | ||
2115 | smi_info->oem_data_avail_handler = | ||
2116 | oem_data_avail_to_receive_msg_avail; | ||
2117 | } | ||
2071 | } | 2118 | } |
2072 | } | 2119 | } |
2073 | 2120 | ||
2121 | #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA | ||
2122 | static void return_hosed_msg_badsize(struct smi_info *smi_info) | ||
2123 | { | ||
2124 | struct ipmi_smi_msg *msg = smi_info->curr_msg; | ||
2125 | |||
2126 | /* Make it a reponse */ | ||
2127 | msg->rsp[0] = msg->data[0] | 4; | ||
2128 | msg->rsp[1] = msg->data[1]; | ||
2129 | msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; | ||
2130 | msg->rsp_size = 3; | ||
2131 | smi_info->curr_msg = NULL; | ||
2132 | deliver_recv_msg(smi_info, msg); | ||
2133 | } | ||
2134 | |||
2135 | /* | ||
2136 | * dell_poweredge_bt_xaction_handler | ||
2137 | * @info - smi_info.device_id must be populated | ||
2138 | * | ||
2139 | * Dell PowerEdge servers with the BT interface (x6xx and 1750) will | ||
2140 | * not respond to a Get SDR command if the length of the data | ||
2141 | * requested is exactly 0x3A, which leads to command timeouts and no | ||
2142 | * data returned. This intercepts such commands, and causes userspace | ||
2143 | * callers to try again with a different-sized buffer, which succeeds. | ||
2144 | */ | ||
2145 | |||
2146 | #define STORAGE_NETFN 0x0A | ||
2147 | #define STORAGE_CMD_GET_SDR 0x23 | ||
2148 | static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, | ||
2149 | unsigned long unused, | ||
2150 | void *in) | ||
2151 | { | ||
2152 | struct smi_info *smi_info = in; | ||
2153 | unsigned char *data = smi_info->curr_msg->data; | ||
2154 | unsigned int size = smi_info->curr_msg->data_size; | ||
2155 | if (size >= 8 && | ||
2156 | (data[0]>>2) == STORAGE_NETFN && | ||
2157 | data[1] == STORAGE_CMD_GET_SDR && | ||
2158 | data[7] == 0x3A) { | ||
2159 | return_hosed_msg_badsize(smi_info); | ||
2160 | return NOTIFY_STOP; | ||
2161 | } | ||
2162 | return NOTIFY_DONE; | ||
2163 | } | ||
2164 | |||
2165 | static struct notifier_block dell_poweredge_bt_xaction_notifier = { | ||
2166 | .notifier_call = dell_poweredge_bt_xaction_handler, | ||
2167 | }; | ||
2168 | |||
2169 | /* | ||
2170 | * setup_dell_poweredge_bt_xaction_handler | ||
2171 | * @info - smi_info.device_id must be filled in already | ||
2172 | * | ||
2173 | * Fills in smi_info.device_id.start_transaction_pre_hook | ||
2174 | * when we know what function to use there. | ||
2175 | */ | ||
2176 | static void | ||
2177 | setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) | ||
2178 | { | ||
2179 | struct ipmi_device_id *id = &smi_info->device_id; | ||
2180 | const char mfr[3]=DELL_IANA_MFR_ID; | ||
2181 | if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) && | ||
2182 | smi_info->si_type == SI_BT) | ||
2183 | register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); | ||
2184 | } | ||
2185 | |||
2074 | /* | 2186 | /* |
2075 | * setup_oem_data_handler | 2187 | * setup_oem_data_handler |
2076 | * @info - smi_info.device_id must be filled in already | 2188 | * @info - smi_info.device_id must be filled in already |
@@ -2084,6 +2196,18 @@ static void setup_oem_data_handler(struct smi_info *smi_info) | |||
2084 | setup_dell_poweredge_oem_data_handler(smi_info); | 2196 | setup_dell_poweredge_oem_data_handler(smi_info); |
2085 | } | 2197 | } |
2086 | 2198 | ||
2199 | static void setup_xaction_handlers(struct smi_info *smi_info) | ||
2200 | { | ||
2201 | setup_dell_poweredge_bt_xaction_handler(smi_info); | ||
2202 | } | ||
2203 | |||
2204 | static inline void wait_for_timer_and_thread(struct smi_info *smi_info) | ||
2205 | { | ||
2206 | if (smi_info->thread != ERR_PTR(-ENOMEM)) | ||
2207 | kthread_stop(smi_info->thread); | ||
2208 | del_timer_sync(&smi_info->si_timer); | ||
2209 | } | ||
2210 | |||
2087 | /* Returns 0 if initialized, or negative on an error. */ | 2211 | /* Returns 0 if initialized, or negative on an error. */ |
2088 | static int init_one_smi(int intf_num, struct smi_info **smi) | 2212 | static int init_one_smi(int intf_num, struct smi_info **smi) |
2089 | { | 2213 | { |
@@ -2179,6 +2303,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2179 | goto out_err; | 2303 | goto out_err; |
2180 | 2304 | ||
2181 | setup_oem_data_handler(new_smi); | 2305 | setup_oem_data_handler(new_smi); |
2306 | setup_xaction_handlers(new_smi); | ||
2182 | 2307 | ||
2183 | /* Try to claim any interrupts. */ | 2308 | /* Try to claim any interrupts. */ |
2184 | new_smi->irq_setup(new_smi); | 2309 | new_smi->irq_setup(new_smi); |
@@ -2190,8 +2315,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2190 | new_smi->run_to_completion = 0; | 2315 | new_smi->run_to_completion = 0; |
2191 | 2316 | ||
2192 | new_smi->interrupt_disabled = 0; | 2317 | new_smi->interrupt_disabled = 0; |
2193 | new_smi->timer_stopped = 0; | 2318 | atomic_set(&new_smi->stop_operation, 0); |
2194 | new_smi->stop_operation = 0; | 2319 | new_smi->intf_num = intf_num; |
2195 | 2320 | ||
2196 | /* Start clearing the flags before we enable interrupts or the | 2321 | /* Start clearing the flags before we enable interrupts or the |
2197 | timer to avoid racing with the timer. */ | 2322 | timer to avoid racing with the timer. */ |
@@ -2209,7 +2334,11 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2209 | new_smi->si_timer.function = smi_timeout; | 2334 | new_smi->si_timer.function = smi_timeout; |
2210 | new_smi->last_timeout_jiffies = jiffies; | 2335 | new_smi->last_timeout_jiffies = jiffies; |
2211 | new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; | 2336 | new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; |
2337 | |||
2212 | add_timer(&(new_smi->si_timer)); | 2338 | add_timer(&(new_smi->si_timer)); |
2339 | if (new_smi->si_type != SI_BT) | ||
2340 | new_smi->thread = kthread_run(ipmi_thread, new_smi, | ||
2341 | "kipmi%d", new_smi->intf_num); | ||
2213 | 2342 | ||
2214 | rv = ipmi_register_smi(&handlers, | 2343 | rv = ipmi_register_smi(&handlers, |
2215 | new_smi, | 2344 | new_smi, |
@@ -2251,12 +2380,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2251 | return 0; | 2380 | return 0; |
2252 | 2381 | ||
2253 | out_err_stop_timer: | 2382 | out_err_stop_timer: |
2254 | new_smi->stop_operation = 1; | 2383 | atomic_inc(&new_smi->stop_operation); |
2255 | 2384 | wait_for_timer_and_thread(new_smi); | |
2256 | /* Wait for the timer to stop. This avoids problems with race | ||
2257 | conditions removing the timer here. */ | ||
2258 | while (!new_smi->timer_stopped) | ||
2259 | schedule_timeout_uninterruptible(1); | ||
2260 | 2385 | ||
2261 | out_err: | 2386 | out_err: |
2262 | if (new_smi->intf) | 2387 | if (new_smi->intf) |
@@ -2362,8 +2487,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) | |||
2362 | spin_lock_irqsave(&(to_clean->si_lock), flags); | 2487 | spin_lock_irqsave(&(to_clean->si_lock), flags); |
2363 | spin_lock(&(to_clean->msg_lock)); | 2488 | spin_lock(&(to_clean->msg_lock)); |
2364 | 2489 | ||
2365 | to_clean->stop_operation = 1; | 2490 | atomic_inc(&to_clean->stop_operation); |
2366 | |||
2367 | to_clean->irq_cleanup(to_clean); | 2491 | to_clean->irq_cleanup(to_clean); |
2368 | 2492 | ||
2369 | spin_unlock(&(to_clean->msg_lock)); | 2493 | spin_unlock(&(to_clean->msg_lock)); |
@@ -2374,10 +2498,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) | |||
2374 | interrupt. */ | 2498 | interrupt. */ |
2375 | synchronize_sched(); | 2499 | synchronize_sched(); |
2376 | 2500 | ||
2377 | /* Wait for the timer to stop. This avoids problems with race | 2501 | wait_for_timer_and_thread(to_clean); |
2378 | conditions removing the timer here. */ | ||
2379 | while (!to_clean->timer_stopped) | ||
2380 | schedule_timeout_uninterruptible(1); | ||
2381 | 2502 | ||
2382 | /* Interrupts and timeouts are stopped, now make sure the | 2503 | /* Interrupts and timeouts are stopped, now make sure the |
2383 | interface is in a clean state. */ | 2504 | interface is in a clean state. */ |