diff options
-rw-r--r-- | Documentation/devicetree/bindings/ipmi/npcm7xx-kcs-bmc.txt | 39 | ||||
-rw-r--r-- | drivers/char/ipmi/Kconfig | 23 | ||||
-rw-r--r-- | drivers/char/ipmi/Makefile | 1 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_bt_sm.c | 3 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_devintf.c | 129 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 2121 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_poweroff.c | 32 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 198 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_ssif.c | 183 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_watchdog.c | 407 | ||||
-rw-r--r-- | drivers/char/ipmi/kcs_bmc_npcm7xx.c | 215 | ||||
-rw-r--r-- | include/linux/ipmi.h | 153 | ||||
-rw-r--r-- | include/linux/ipmi_smi.h | 129 |
13 files changed, 1751 insertions, 1882 deletions
diff --git a/Documentation/devicetree/bindings/ipmi/npcm7xx-kcs-bmc.txt b/Documentation/devicetree/bindings/ipmi/npcm7xx-kcs-bmc.txt new file mode 100644 index 000000000000..3538a214fff1 --- /dev/null +++ b/Documentation/devicetree/bindings/ipmi/npcm7xx-kcs-bmc.txt | |||
@@ -0,0 +1,39 @@ | |||
1 | * Nuvoton NPCM7xx KCS (Keyboard Controller Style) IPMI interface | ||
2 | |||
3 | The Nuvoton SOCs (NPCM7xx) are commonly used as BMCs | ||
4 | (Baseboard Management Controllers) and the KCS interface can be | ||
5 | used to perform in-band IPMI communication with their host. | ||
6 | |||
7 | Required properties: | ||
8 | - compatible : should be one of | ||
9 | "nuvoton,npcm750-kcs-bmc" | ||
10 | - interrupts : interrupt generated by the controller | ||
11 | - kcs_chan : The KCS channel number in the controller | ||
12 | |||
13 | Example: | ||
14 | |||
15 | lpc_kcs: lpc_kcs@f0007000 { | ||
16 | compatible = "nuvoton,npcm750-lpc-kcs", "simple-mfd", "syscon"; | ||
17 | reg = <0xf0007000 0x40>; | ||
18 | reg-io-width = <1>; | ||
19 | |||
20 | #address-cells = <1>; | ||
21 | #size-cells = <1>; | ||
22 | ranges = <0x0 0xf0007000 0x40>; | ||
23 | |||
24 | kcs1: kcs1@0 { | ||
25 | compatible = "nuvoton,npcm750-kcs-bmc"; | ||
26 | reg = <0x0 0x40>; | ||
27 | interrupts = <0 9 4>; | ||
28 | kcs_chan = <1>; | ||
29 | status = "disabled"; | ||
30 | }; | ||
31 | |||
32 | kcs2: kcs2@0 { | ||
33 | compatible = "nuvoton,npcm750-kcs-bmc"; | ||
34 | reg = <0x0 0x40>; | ||
35 | interrupts = <0 9 4>; | ||
36 | kcs_chan = <2>; | ||
37 | status = "disabled"; | ||
38 | }; | ||
39 | }; \ No newline at end of file | ||
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index 3bda116c8aa0..c108441882cc 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig | |||
@@ -22,14 +22,6 @@ config IPMI_DMI_DECODE | |||
22 | 22 | ||
23 | if IPMI_HANDLER | 23 | if IPMI_HANDLER |
24 | 24 | ||
25 | config IPMI_PROC_INTERFACE | ||
26 | bool 'Provide an interface for IPMI stats in /proc (deprecated)' | ||
27 | depends on PROC_FS | ||
28 | default y | ||
29 | help | ||
30 | Do not use this any more, use sysfs for this info. It will be | ||
31 | removed in future kernel versions. | ||
32 | |||
33 | config IPMI_PANIC_EVENT | 25 | config IPMI_PANIC_EVENT |
34 | bool 'Generate a panic event to all BMCs on a panic' | 26 | bool 'Generate a panic event to all BMCs on a panic' |
35 | help | 27 | help |
@@ -111,6 +103,21 @@ config ASPEED_KCS_IPMI_BMC | |||
111 | The driver implements the BMC side of the KCS contorller, it | 103 | The driver implements the BMC side of the KCS contorller, it |
112 | provides the access of KCS IO space for BMC side. | 104 | provides the access of KCS IO space for BMC side. |
113 | 105 | ||
106 | config NPCM7XX_KCS_IPMI_BMC | ||
107 | depends on ARCH_NPCM7XX || COMPILE_TEST | ||
108 | select IPMI_KCS_BMC | ||
109 | select REGMAP_MMIO | ||
110 | tristate "NPCM7xx KCS IPMI BMC driver" | ||
111 | help | ||
112 | Provides a driver for the KCS (Keyboard Controller Style) IPMI | ||
113 | interface found on Nuvoton NPCM7xx SOCs. | ||
114 | |||
115 | The driver implements the BMC side of the KCS contorller, it | ||
116 | provides the access of KCS IO space for BMC side. | ||
117 | |||
118 | This support is also available as a module. If so, the module | ||
119 | will be called kcs_bmc_npcm7xx. | ||
120 | |||
114 | config ASPEED_BT_IPMI_BMC | 121 | config ASPEED_BT_IPMI_BMC |
115 | depends on ARCH_ASPEED || COMPILE_TEST | 122 | depends on ARCH_ASPEED || COMPILE_TEST |
116 | depends on REGMAP && REGMAP_MMIO && MFD_SYSCON | 123 | depends on REGMAP && REGMAP_MMIO && MFD_SYSCON |
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index 21e9e872d973..7a3baf301a8f 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile | |||
@@ -24,3 +24,4 @@ obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o | |||
24 | obj-$(CONFIG_IPMI_KCS_BMC) += kcs_bmc.o | 24 | obj-$(CONFIG_IPMI_KCS_BMC) += kcs_bmc.o |
25 | obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o | 25 | obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o |
26 | obj-$(CONFIG_ASPEED_KCS_IPMI_BMC) += kcs_bmc_aspeed.o | 26 | obj-$(CONFIG_ASPEED_KCS_IPMI_BMC) += kcs_bmc_aspeed.o |
27 | obj-$(CONFIG_NPCM7XX_KCS_IPMI_BMC) += kcs_bmc_npcm7xx.o | ||
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index fd4ea8d87d4b..a3397664f800 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c | |||
@@ -504,11 +504,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
504 | if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ | 504 | if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ |
505 | BT_CONTROL(BT_H_BUSY); | 505 | BT_CONTROL(BT_H_BUSY); |
506 | 506 | ||
507 | bt->timeout = bt->BT_CAP_req2rsp; | ||
508 | |||
507 | /* Read BT capabilities if it hasn't been done yet */ | 509 | /* Read BT capabilities if it hasn't been done yet */ |
508 | if (!bt->BT_CAP_outreqs) | 510 | if (!bt->BT_CAP_outreqs) |
509 | BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, | 511 | BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, |
510 | SI_SM_CALL_WITHOUT_DELAY); | 512 | SI_SM_CALL_WITHOUT_DELAY); |
511 | bt->timeout = bt->BT_CAP_req2rsp; | ||
512 | BT_SI_SM_RETURN(SI_SM_IDLE); | 513 | BT_SI_SM_RETURN(SI_SM_IDLE); |
513 | 514 | ||
514 | case BT_STATE_XACTION_START: | 515 | case BT_STATE_XACTION_START: |
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 8ecfd47806fa..1a486aec99b6 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | struct ipmi_file_private | 27 | struct ipmi_file_private |
28 | { | 28 | { |
29 | ipmi_user_t user; | 29 | struct ipmi_user *user; |
30 | spinlock_t recv_msg_lock; | 30 | spinlock_t recv_msg_lock; |
31 | struct list_head recv_msgs; | 31 | struct list_head recv_msgs; |
32 | struct file *file; | 32 | struct file *file; |
@@ -37,7 +37,6 @@ struct ipmi_file_private | |||
37 | unsigned int default_retry_time_ms; | 37 | unsigned int default_retry_time_ms; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static DEFINE_MUTEX(ipmi_mutex); | ||
41 | static void file_receive_handler(struct ipmi_recv_msg *msg, | 40 | static void file_receive_handler(struct ipmi_recv_msg *msg, |
42 | void *handler_data) | 41 | void *handler_data) |
43 | { | 42 | { |
@@ -45,17 +44,15 @@ static void file_receive_handler(struct ipmi_recv_msg *msg, | |||
45 | int was_empty; | 44 | int was_empty; |
46 | unsigned long flags; | 45 | unsigned long flags; |
47 | 46 | ||
48 | spin_lock_irqsave(&(priv->recv_msg_lock), flags); | 47 | spin_lock_irqsave(&priv->recv_msg_lock, flags); |
49 | 48 | was_empty = list_empty(&priv->recv_msgs); | |
50 | was_empty = list_empty(&(priv->recv_msgs)); | 49 | list_add_tail(&msg->link, &priv->recv_msgs); |
51 | list_add_tail(&(msg->link), &(priv->recv_msgs)); | 50 | spin_unlock_irqrestore(&priv->recv_msg_lock, flags); |
52 | 51 | ||
53 | if (was_empty) { | 52 | if (was_empty) { |
54 | wake_up_interruptible(&priv->wait); | 53 | wake_up_interruptible(&priv->wait); |
55 | kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); | 54 | kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); |
56 | } | 55 | } |
57 | |||
58 | spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); | ||
59 | } | 56 | } |
60 | 57 | ||
61 | static __poll_t ipmi_poll(struct file *file, poll_table *wait) | 58 | static __poll_t ipmi_poll(struct file *file, poll_table *wait) |
@@ -68,7 +65,7 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait) | |||
68 | 65 | ||
69 | spin_lock_irqsave(&priv->recv_msg_lock, flags); | 66 | spin_lock_irqsave(&priv->recv_msg_lock, flags); |
70 | 67 | ||
71 | if (!list_empty(&(priv->recv_msgs))) | 68 | if (!list_empty(&priv->recv_msgs)) |
72 | mask |= (EPOLLIN | EPOLLRDNORM); | 69 | mask |= (EPOLLIN | EPOLLRDNORM); |
73 | 70 | ||
74 | spin_unlock_irqrestore(&priv->recv_msg_lock, flags); | 71 | spin_unlock_irqrestore(&priv->recv_msg_lock, flags); |
@@ -79,13 +76,8 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait) | |||
79 | static int ipmi_fasync(int fd, struct file *file, int on) | 76 | static int ipmi_fasync(int fd, struct file *file, int on) |
80 | { | 77 | { |
81 | struct ipmi_file_private *priv = file->private_data; | 78 | struct ipmi_file_private *priv = file->private_data; |
82 | int result; | ||
83 | 79 | ||
84 | mutex_lock(&ipmi_mutex); /* could race against open() otherwise */ | 80 | return fasync_helper(fd, file, on, &priv->fasync_queue); |
85 | result = fasync_helper(fd, file, on, &priv->fasync_queue); | ||
86 | mutex_unlock(&ipmi_mutex); | ||
87 | |||
88 | return (result); | ||
89 | } | 81 | } |
90 | 82 | ||
91 | static const struct ipmi_user_hndl ipmi_hndlrs = | 83 | static const struct ipmi_user_hndl ipmi_hndlrs = |
@@ -99,18 +91,16 @@ static int ipmi_open(struct inode *inode, struct file *file) | |||
99 | int rv; | 91 | int rv; |
100 | struct ipmi_file_private *priv; | 92 | struct ipmi_file_private *priv; |
101 | 93 | ||
102 | |||
103 | priv = kmalloc(sizeof(*priv), GFP_KERNEL); | 94 | priv = kmalloc(sizeof(*priv), GFP_KERNEL); |
104 | if (!priv) | 95 | if (!priv) |
105 | return -ENOMEM; | 96 | return -ENOMEM; |
106 | 97 | ||
107 | mutex_lock(&ipmi_mutex); | ||
108 | priv->file = file; | 98 | priv->file = file; |
109 | 99 | ||
110 | rv = ipmi_create_user(if_num, | 100 | rv = ipmi_create_user(if_num, |
111 | &ipmi_hndlrs, | 101 | &ipmi_hndlrs, |
112 | priv, | 102 | priv, |
113 | &(priv->user)); | 103 | &priv->user); |
114 | if (rv) { | 104 | if (rv) { |
115 | kfree(priv); | 105 | kfree(priv); |
116 | goto out; | 106 | goto out; |
@@ -118,8 +108,8 @@ static int ipmi_open(struct inode *inode, struct file *file) | |||
118 | 108 | ||
119 | file->private_data = priv; | 109 | file->private_data = priv; |
120 | 110 | ||
121 | spin_lock_init(&(priv->recv_msg_lock)); | 111 | spin_lock_init(&priv->recv_msg_lock); |
122 | INIT_LIST_HEAD(&(priv->recv_msgs)); | 112 | INIT_LIST_HEAD(&priv->recv_msgs); |
123 | init_waitqueue_head(&priv->wait); | 113 | init_waitqueue_head(&priv->wait); |
124 | priv->fasync_queue = NULL; | 114 | priv->fasync_queue = NULL; |
125 | mutex_init(&priv->recv_mutex); | 115 | mutex_init(&priv->recv_mutex); |
@@ -129,7 +119,6 @@ static int ipmi_open(struct inode *inode, struct file *file) | |||
129 | priv->default_retry_time_ms = 0; | 119 | priv->default_retry_time_ms = 0; |
130 | 120 | ||
131 | out: | 121 | out: |
132 | mutex_unlock(&ipmi_mutex); | ||
133 | return rv; | 122 | return rv; |
134 | } | 123 | } |
135 | 124 | ||
@@ -137,7 +126,7 @@ static int ipmi_release(struct inode *inode, struct file *file) | |||
137 | { | 126 | { |
138 | struct ipmi_file_private *priv = file->private_data; | 127 | struct ipmi_file_private *priv = file->private_data; |
139 | int rv; | 128 | int rv; |
140 | struct ipmi_recv_msg *msg, *next; | 129 | struct ipmi_recv_msg *msg, *next; |
141 | 130 | ||
142 | rv = ipmi_destroy_user(priv->user); | 131 | rv = ipmi_destroy_user(priv->user); |
143 | if (rv) | 132 | if (rv) |
@@ -146,13 +135,12 @@ static int ipmi_release(struct inode *inode, struct file *file) | |||
146 | list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) | 135 | list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) |
147 | ipmi_free_recv_msg(msg); | 136 | ipmi_free_recv_msg(msg); |
148 | 137 | ||
149 | |||
150 | kfree(priv); | 138 | kfree(priv); |
151 | 139 | ||
152 | return 0; | 140 | return 0; |
153 | } | 141 | } |
154 | 142 | ||
155 | static int handle_send_req(ipmi_user_t user, | 143 | static int handle_send_req(struct ipmi_user *user, |
156 | struct ipmi_req *req, | 144 | struct ipmi_req *req, |
157 | int retries, | 145 | int retries, |
158 | unsigned int retry_time_ms) | 146 | unsigned int retry_time_ms) |
@@ -189,8 +177,7 @@ static int handle_send_req(ipmi_user_t user, | |||
189 | 177 | ||
190 | if (copy_from_user(msg.data, | 178 | if (copy_from_user(msg.data, |
191 | req->msg.data, | 179 | req->msg.data, |
192 | req->msg.data_len)) | 180 | req->msg.data_len)) { |
193 | { | ||
194 | rv = -EFAULT; | 181 | rv = -EFAULT; |
195 | goto out; | 182 | goto out; |
196 | } | 183 | } |
@@ -233,25 +220,24 @@ static int handle_recv(struct ipmi_file_private *priv, | |||
233 | mutex_lock(&priv->recv_mutex); | 220 | mutex_lock(&priv->recv_mutex); |
234 | 221 | ||
235 | /* Grab the message off the list. */ | 222 | /* Grab the message off the list. */ |
236 | spin_lock_irqsave(&(priv->recv_msg_lock), flags); | 223 | spin_lock_irqsave(&priv->recv_msg_lock, flags); |
237 | if (list_empty(&(priv->recv_msgs))) { | 224 | if (list_empty(&(priv->recv_msgs))) { |
238 | spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); | 225 | spin_unlock_irqrestore(&priv->recv_msg_lock, flags); |
239 | rv = -EAGAIN; | 226 | rv = -EAGAIN; |
240 | goto recv_err; | 227 | goto recv_err; |
241 | } | 228 | } |
242 | entry = priv->recv_msgs.next; | 229 | entry = priv->recv_msgs.next; |
243 | msg = list_entry(entry, struct ipmi_recv_msg, link); | 230 | msg = list_entry(entry, struct ipmi_recv_msg, link); |
244 | list_del(entry); | 231 | list_del(entry); |
245 | spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); | 232 | spin_unlock_irqrestore(&priv->recv_msg_lock, flags); |
246 | 233 | ||
247 | addr_len = ipmi_addr_length(msg->addr.addr_type); | 234 | addr_len = ipmi_addr_length(msg->addr.addr_type); |
248 | if (rsp->addr_len < addr_len) | 235 | if (rsp->addr_len < addr_len) { |
249 | { | ||
250 | rv = -EINVAL; | 236 | rv = -EINVAL; |
251 | goto recv_putback_on_err; | 237 | goto recv_putback_on_err; |
252 | } | 238 | } |
253 | 239 | ||
254 | if (copy_to_user(rsp->addr, &(msg->addr), addr_len)) { | 240 | if (copy_to_user(rsp->addr, &msg->addr, addr_len)) { |
255 | rv = -EFAULT; | 241 | rv = -EFAULT; |
256 | goto recv_putback_on_err; | 242 | goto recv_putback_on_err; |
257 | } | 243 | } |
@@ -273,8 +259,7 @@ static int handle_recv(struct ipmi_file_private *priv, | |||
273 | 259 | ||
274 | if (copy_to_user(rsp->msg.data, | 260 | if (copy_to_user(rsp->msg.data, |
275 | msg->msg.data, | 261 | msg->msg.data, |
276 | msg->msg.data_len)) | 262 | msg->msg.data_len)) { |
277 | { | ||
278 | rv = -EFAULT; | 263 | rv = -EFAULT; |
279 | goto recv_putback_on_err; | 264 | goto recv_putback_on_err; |
280 | } | 265 | } |
@@ -294,9 +279,9 @@ static int handle_recv(struct ipmi_file_private *priv, | |||
294 | recv_putback_on_err: | 279 | recv_putback_on_err: |
295 | /* If we got an error, put the message back onto | 280 | /* If we got an error, put the message back onto |
296 | the head of the queue. */ | 281 | the head of the queue. */ |
297 | spin_lock_irqsave(&(priv->recv_msg_lock), flags); | 282 | spin_lock_irqsave(&priv->recv_msg_lock, flags); |
298 | list_add(entry, &(priv->recv_msgs)); | 283 | list_add(entry, &priv->recv_msgs); |
299 | spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); | 284 | spin_unlock_irqrestore(&priv->recv_msg_lock, flags); |
300 | recv_err: | 285 | recv_err: |
301 | mutex_unlock(&priv->recv_mutex); | 286 | mutex_unlock(&priv->recv_mutex); |
302 | return rv; | 287 | return rv; |
@@ -307,9 +292,9 @@ static int copyout_recv(struct ipmi_recv *rsp, void __user *to) | |||
307 | return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0; | 292 | return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0; |
308 | } | 293 | } |
309 | 294 | ||
310 | static int ipmi_ioctl(struct file *file, | 295 | static long ipmi_ioctl(struct file *file, |
311 | unsigned int cmd, | 296 | unsigned int cmd, |
312 | unsigned long data) | 297 | unsigned long data) |
313 | { | 298 | { |
314 | int rv = -EINVAL; | 299 | int rv = -EINVAL; |
315 | struct ipmi_file_private *priv = file->private_data; | 300 | struct ipmi_file_private *priv = file->private_data; |
@@ -320,16 +305,20 @@ static int ipmi_ioctl(struct file *file, | |||
320 | case IPMICTL_SEND_COMMAND: | 305 | case IPMICTL_SEND_COMMAND: |
321 | { | 306 | { |
322 | struct ipmi_req req; | 307 | struct ipmi_req req; |
308 | int retries; | ||
309 | unsigned int retry_time_ms; | ||
323 | 310 | ||
324 | if (copy_from_user(&req, arg, sizeof(req))) { | 311 | if (copy_from_user(&req, arg, sizeof(req))) { |
325 | rv = -EFAULT; | 312 | rv = -EFAULT; |
326 | break; | 313 | break; |
327 | } | 314 | } |
328 | 315 | ||
329 | rv = handle_send_req(priv->user, | 316 | mutex_lock(&priv->recv_mutex); |
330 | &req, | 317 | retries = priv->default_retries; |
331 | priv->default_retries, | 318 | retry_time_ms = priv->default_retry_time_ms; |
332 | priv->default_retry_time_ms); | 319 | mutex_unlock(&priv->recv_mutex); |
320 | |||
321 | rv = handle_send_req(priv->user, &req, retries, retry_time_ms); | ||
333 | break; | 322 | break; |
334 | } | 323 | } |
335 | 324 | ||
@@ -569,8 +558,10 @@ static int ipmi_ioctl(struct file *file, | |||
569 | break; | 558 | break; |
570 | } | 559 | } |
571 | 560 | ||
561 | mutex_lock(&priv->recv_mutex); | ||
572 | priv->default_retries = parms.retries; | 562 | priv->default_retries = parms.retries; |
573 | priv->default_retry_time_ms = parms.retry_time_ms; | 563 | priv->default_retry_time_ms = parms.retry_time_ms; |
564 | mutex_unlock(&priv->recv_mutex); | ||
574 | rv = 0; | 565 | rv = 0; |
575 | break; | 566 | break; |
576 | } | 567 | } |
@@ -579,8 +570,10 @@ static int ipmi_ioctl(struct file *file, | |||
579 | { | 570 | { |
580 | struct ipmi_timing_parms parms; | 571 | struct ipmi_timing_parms parms; |
581 | 572 | ||
573 | mutex_lock(&priv->recv_mutex); | ||
582 | parms.retries = priv->default_retries; | 574 | parms.retries = priv->default_retries; |
583 | parms.retry_time_ms = priv->default_retry_time_ms; | 575 | parms.retry_time_ms = priv->default_retry_time_ms; |
576 | mutex_unlock(&priv->recv_mutex); | ||
584 | 577 | ||
585 | if (copy_to_user(arg, &parms, sizeof(parms))) { | 578 | if (copy_to_user(arg, &parms, sizeof(parms))) { |
586 | rv = -EFAULT; | 579 | rv = -EFAULT; |
@@ -615,30 +608,16 @@ static int ipmi_ioctl(struct file *file, | |||
615 | rv = ipmi_set_maintenance_mode(priv->user, mode); | 608 | rv = ipmi_set_maintenance_mode(priv->user, mode); |
616 | break; | 609 | break; |
617 | } | 610 | } |
611 | |||
612 | default: | ||
613 | rv = -ENOTTY; | ||
614 | break; | ||
618 | } | 615 | } |
619 | 616 | ||
620 | return rv; | 617 | return rv; |
621 | } | 618 | } |
622 | 619 | ||
623 | /* | ||
624 | * Note: it doesn't make sense to take the BKL here but | ||
625 | * not in compat_ipmi_ioctl. -arnd | ||
626 | */ | ||
627 | static long ipmi_unlocked_ioctl(struct file *file, | ||
628 | unsigned int cmd, | ||
629 | unsigned long data) | ||
630 | { | ||
631 | int ret; | ||
632 | |||
633 | mutex_lock(&ipmi_mutex); | ||
634 | ret = ipmi_ioctl(file, cmd, data); | ||
635 | mutex_unlock(&ipmi_mutex); | ||
636 | |||
637 | return ret; | ||
638 | } | ||
639 | |||
640 | #ifdef CONFIG_COMPAT | 620 | #ifdef CONFIG_COMPAT |
641 | |||
642 | /* | 621 | /* |
643 | * The following code contains code for supporting 32-bit compatible | 622 | * The following code contains code for supporting 32-bit compatible |
644 | * ioctls on 64-bit kernels. This allows running 32-bit apps on the | 623 | * ioctls on 64-bit kernels. This allows running 32-bit apps on the |
@@ -749,15 +728,21 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, | |||
749 | { | 728 | { |
750 | struct ipmi_req rp; | 729 | struct ipmi_req rp; |
751 | struct compat_ipmi_req r32; | 730 | struct compat_ipmi_req r32; |
731 | int retries; | ||
732 | unsigned int retry_time_ms; | ||
752 | 733 | ||
753 | if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32))) | 734 | if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32))) |
754 | return -EFAULT; | 735 | return -EFAULT; |
755 | 736 | ||
756 | get_compat_ipmi_req(&rp, &r32); | 737 | get_compat_ipmi_req(&rp, &r32); |
757 | 738 | ||
739 | mutex_lock(&priv->recv_mutex); | ||
740 | retries = priv->default_retries; | ||
741 | retry_time_ms = priv->default_retry_time_ms; | ||
742 | mutex_unlock(&priv->recv_mutex); | ||
743 | |||
758 | return handle_send_req(priv->user, &rp, | 744 | return handle_send_req(priv->user, &rp, |
759 | priv->default_retries, | 745 | retries, retry_time_ms); |
760 | priv->default_retry_time_ms); | ||
761 | } | 746 | } |
762 | case COMPAT_IPMICTL_SEND_COMMAND_SETTIME: | 747 | case COMPAT_IPMICTL_SEND_COMMAND_SETTIME: |
763 | { | 748 | { |
@@ -791,25 +776,13 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, | |||
791 | return ipmi_ioctl(filep, cmd, arg); | 776 | return ipmi_ioctl(filep, cmd, arg); |
792 | } | 777 | } |
793 | } | 778 | } |
794 | |||
795 | static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, | ||
796 | unsigned long arg) | ||
797 | { | ||
798 | int ret; | ||
799 | |||
800 | mutex_lock(&ipmi_mutex); | ||
801 | ret = compat_ipmi_ioctl(filep, cmd, arg); | ||
802 | mutex_unlock(&ipmi_mutex); | ||
803 | |||
804 | return ret; | ||
805 | } | ||
806 | #endif | 779 | #endif |
807 | 780 | ||
808 | static const struct file_operations ipmi_fops = { | 781 | static const struct file_operations ipmi_fops = { |
809 | .owner = THIS_MODULE, | 782 | .owner = THIS_MODULE, |
810 | .unlocked_ioctl = ipmi_unlocked_ioctl, | 783 | .unlocked_ioctl = ipmi_ioctl, |
811 | #ifdef CONFIG_COMPAT | 784 | #ifdef CONFIG_COMPAT |
812 | .compat_ioctl = unlocked_compat_ipmi_ioctl, | 785 | .compat_ioctl = compat_ipmi_ioctl, |
813 | #endif | 786 | #endif |
814 | .open = ipmi_open, | 787 | .open = ipmi_open, |
815 | .release = ipmi_release, | 788 | .release = ipmi_release, |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 361148938801..51832b8a2c62 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -37,11 +37,30 @@ | |||
37 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); | 37 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); |
38 | static int ipmi_init_msghandler(void); | 38 | static int ipmi_init_msghandler(void); |
39 | static void smi_recv_tasklet(unsigned long); | 39 | static void smi_recv_tasklet(unsigned long); |
40 | static void handle_new_recv_msgs(ipmi_smi_t intf); | 40 | static void handle_new_recv_msgs(struct ipmi_smi *intf); |
41 | static void need_waiter(ipmi_smi_t intf); | 41 | static void need_waiter(struct ipmi_smi *intf); |
42 | static int handle_one_recv_msg(ipmi_smi_t intf, | 42 | static int handle_one_recv_msg(struct ipmi_smi *intf, |
43 | struct ipmi_smi_msg *msg); | 43 | struct ipmi_smi_msg *msg); |
44 | 44 | ||
45 | #ifdef DEBUG | ||
46 | static void ipmi_debug_msg(const char *title, unsigned char *data, | ||
47 | unsigned int len) | ||
48 | { | ||
49 | int i, pos; | ||
50 | char buf[100]; | ||
51 | |||
52 | pos = snprintf(buf, sizeof(buf), "%s: ", title); | ||
53 | for (i = 0; i < len; i++) | ||
54 | pos += snprintf(buf + pos, sizeof(buf) - pos, | ||
55 | " %2.2x", data[i]); | ||
56 | pr_debug("%s\n", buf); | ||
57 | } | ||
58 | #else | ||
59 | static void ipmi_debug_msg(const char *title, unsigned char *data, | ||
60 | unsigned int len) | ||
61 | { } | ||
62 | #endif | ||
63 | |||
45 | static int initialized; | 64 | static int initialized; |
46 | 65 | ||
47 | enum ipmi_panic_event_op { | 66 | enum ipmi_panic_event_op { |
@@ -112,14 +131,13 @@ module_param_cb(panic_op, &panic_op_ops, NULL, 0600); | |||
112 | MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); | 131 | MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); |
113 | 132 | ||
114 | 133 | ||
115 | #ifdef CONFIG_IPMI_PROC_INTERFACE | 134 | #define MAX_EVENTS_IN_QUEUE 25 |
116 | static struct proc_dir_entry *proc_ipmi_root; | ||
117 | #endif /* CONFIG_IPMI_PROC_INTERFACE */ | ||
118 | 135 | ||
119 | /* Remain in auto-maintenance mode for this amount of time (in ms). */ | 136 | /* Remain in auto-maintenance mode for this amount of time (in ms). */ |
120 | #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000 | 137 | static unsigned long maintenance_mode_timeout_ms = 30000; |
121 | 138 | module_param(maintenance_mode_timeout_ms, ulong, 0644); | |
122 | #define MAX_EVENTS_IN_QUEUE 25 | 139 | MODULE_PARM_DESC(maintenance_mode_timeout_ms, |
140 | "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); | ||
123 | 141 | ||
124 | /* | 142 | /* |
125 | * Don't let a message sit in a queue forever, always time it with at lest | 143 | * Don't let a message sit in a queue forever, always time it with at lest |
@@ -127,6 +145,31 @@ static struct proc_dir_entry *proc_ipmi_root; | |||
127 | */ | 145 | */ |
128 | #define MAX_MSG_TIMEOUT 60000 | 146 | #define MAX_MSG_TIMEOUT 60000 |
129 | 147 | ||
148 | /* | ||
149 | * Timeout times below are in milliseconds, and are done off a 1 | ||
150 | * second timer. So setting the value to 1000 would mean anything | ||
151 | * between 0 and 1000ms. So really the only reasonable minimum | ||
152 | * setting it 2000ms, which is between 1 and 2 seconds. | ||
153 | */ | ||
154 | |||
155 | /* The default timeout for message retries. */ | ||
156 | static unsigned long default_retry_ms = 2000; | ||
157 | module_param(default_retry_ms, ulong, 0644); | ||
158 | MODULE_PARM_DESC(default_retry_ms, | ||
159 | "The time (milliseconds) between retry sends"); | ||
160 | |||
161 | /* The default timeout for maintenance mode message retries. */ | ||
162 | static unsigned long default_maintenance_retry_ms = 3000; | ||
163 | module_param(default_maintenance_retry_ms, ulong, 0644); | ||
164 | MODULE_PARM_DESC(default_maintenance_retry_ms, | ||
165 | "The time (milliseconds) between retry sends in maintenance mode"); | ||
166 | |||
167 | /* The default maximum number of retries */ | ||
168 | static unsigned int default_max_retries = 4; | ||
169 | module_param(default_max_retries, uint, 0644); | ||
170 | MODULE_PARM_DESC(default_max_retries, | ||
171 | "The time (milliseconds) between retry sends in maintenance mode"); | ||
172 | |||
130 | /* Call every ~1000 ms. */ | 173 | /* Call every ~1000 ms. */ |
131 | #define IPMI_TIMEOUT_TIME 1000 | 174 | #define IPMI_TIMEOUT_TIME 1000 |
132 | 175 | ||
@@ -150,8 +193,12 @@ static struct proc_dir_entry *proc_ipmi_root; | |||
150 | struct ipmi_user { | 193 | struct ipmi_user { |
151 | struct list_head link; | 194 | struct list_head link; |
152 | 195 | ||
153 | /* Set to false when the user is destroyed. */ | 196 | /* |
154 | bool valid; | 197 | * Set to NULL when the user is destroyed, a pointer to myself |
198 | * so srcu_dereference can be used on it. | ||
199 | */ | ||
200 | struct ipmi_user *self; | ||
201 | struct srcu_struct release_barrier; | ||
155 | 202 | ||
156 | struct kref refcount; | 203 | struct kref refcount; |
157 | 204 | ||
@@ -160,16 +207,33 @@ struct ipmi_user { | |||
160 | void *handler_data; | 207 | void *handler_data; |
161 | 208 | ||
162 | /* The interface this user is bound to. */ | 209 | /* The interface this user is bound to. */ |
163 | ipmi_smi_t intf; | 210 | struct ipmi_smi *intf; |
164 | 211 | ||
165 | /* Does this interface receive IPMI events? */ | 212 | /* Does this interface receive IPMI events? */ |
166 | bool gets_events; | 213 | bool gets_events; |
167 | }; | 214 | }; |
168 | 215 | ||
216 | static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) | ||
217 | __acquires(user->release_barrier) | ||
218 | { | ||
219 | struct ipmi_user *ruser; | ||
220 | |||
221 | *index = srcu_read_lock(&user->release_barrier); | ||
222 | ruser = srcu_dereference(user->self, &user->release_barrier); | ||
223 | if (!ruser) | ||
224 | srcu_read_unlock(&user->release_barrier, *index); | ||
225 | return ruser; | ||
226 | } | ||
227 | |||
228 | static void release_ipmi_user(struct ipmi_user *user, int index) | ||
229 | { | ||
230 | srcu_read_unlock(&user->release_barrier, index); | ||
231 | } | ||
232 | |||
169 | struct cmd_rcvr { | 233 | struct cmd_rcvr { |
170 | struct list_head link; | 234 | struct list_head link; |
171 | 235 | ||
172 | ipmi_user_t user; | 236 | struct ipmi_user *user; |
173 | unsigned char netfn; | 237 | unsigned char netfn; |
174 | unsigned char cmd; | 238 | unsigned char cmd; |
175 | unsigned int chans; | 239 | unsigned int chans; |
@@ -247,13 +311,6 @@ struct ipmi_my_addrinfo { | |||
247 | unsigned char lun; | 311 | unsigned char lun; |
248 | }; | 312 | }; |
249 | 313 | ||
250 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
251 | struct ipmi_proc_entry { | ||
252 | char *name; | ||
253 | struct ipmi_proc_entry *next; | ||
254 | }; | ||
255 | #endif | ||
256 | |||
257 | /* | 314 | /* |
258 | * Note that the product id, manufacturer id, guid, and device id are | 315 | * Note that the product id, manufacturer id, guid, and device id are |
259 | * immutable in this structure, so dyn_mutex is not required for | 316 | * immutable in this structure, so dyn_mutex is not required for |
@@ -275,7 +332,7 @@ struct bmc_device { | |||
275 | }; | 332 | }; |
276 | #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) | 333 | #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) |
277 | 334 | ||
278 | static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc, | 335 | static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, |
279 | struct ipmi_device_id *id, | 336 | struct ipmi_device_id *id, |
280 | bool *guid_set, guid_t *guid); | 337 | bool *guid_set, guid_t *guid); |
281 | 338 | ||
@@ -397,10 +454,11 @@ struct ipmi_smi { | |||
397 | struct list_head link; | 454 | struct list_head link; |
398 | 455 | ||
399 | /* | 456 | /* |
400 | * The list of upper layers that are using me. seq_lock | 457 | * The list of upper layers that are using me. seq_lock write |
401 | * protects this. | 458 | * protects this. Read protection is with srcu. |
402 | */ | 459 | */ |
403 | struct list_head users; | 460 | struct list_head users; |
461 | struct srcu_struct users_srcu; | ||
404 | 462 | ||
405 | /* Used for wake ups at startup. */ | 463 | /* Used for wake ups at startup. */ |
406 | wait_queue_head_t waitq; | 464 | wait_queue_head_t waitq; |
@@ -420,24 +478,9 @@ struct ipmi_smi { | |||
420 | bool in_bmc_register; /* Handle recursive situations. Yuck. */ | 478 | bool in_bmc_register; /* Handle recursive situations. Yuck. */ |
421 | struct work_struct bmc_reg_work; | 479 | struct work_struct bmc_reg_work; |
422 | 480 | ||
423 | /* | ||
424 | * This is the lower-layer's sender routine. Note that you | ||
425 | * must either be holding the ipmi_interfaces_mutex or be in | ||
426 | * an umpreemptible region to use this. You must fetch the | ||
427 | * value into a local variable and make sure it is not NULL. | ||
428 | */ | ||
429 | const struct ipmi_smi_handlers *handlers; | 481 | const struct ipmi_smi_handlers *handlers; |
430 | void *send_info; | 482 | void *send_info; |
431 | 483 | ||
432 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
433 | /* A list of proc entries for this interface. */ | ||
434 | struct mutex proc_entry_lock; | ||
435 | struct ipmi_proc_entry *proc_entries; | ||
436 | |||
437 | struct proc_dir_entry *proc_dir; | ||
438 | char proc_dir_name[10]; | ||
439 | #endif | ||
440 | |||
441 | /* Driver-model device for the system interface. */ | 484 | /* Driver-model device for the system interface. */ |
442 | struct device *si_dev; | 485 | struct device *si_dev; |
443 | 486 | ||
@@ -503,6 +546,13 @@ struct ipmi_smi { | |||
503 | spinlock_t maintenance_mode_lock; /* Used in a timer... */ | 546 | spinlock_t maintenance_mode_lock; /* Used in a timer... */ |
504 | 547 | ||
505 | /* | 548 | /* |
549 | * If we are doing maintenance on something on IPMB, extend | ||
550 | * the timeout time to avoid timeouts writing firmware and | ||
551 | * such. | ||
552 | */ | ||
553 | int ipmb_maintenance_mode_timeout; | ||
554 | |||
555 | /* | ||
506 | * A cheap hack, if this is non-null and a message to an | 556 | * A cheap hack, if this is non-null and a message to an |
507 | * interface comes in with a NULL user, call this routine with | 557 | * interface comes in with a NULL user, call this routine with |
508 | * it. Note that the message will still be freed by the | 558 | * it. Note that the message will still be freed by the |
@@ -510,7 +560,8 @@ struct ipmi_smi { | |||
510 | * | 560 | * |
511 | * Protected by bmc_reg_mutex. | 561 | * Protected by bmc_reg_mutex. |
512 | */ | 562 | */ |
513 | void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); | 563 | void (*null_user_handler)(struct ipmi_smi *intf, |
564 | struct ipmi_recv_msg *msg); | ||
514 | 565 | ||
515 | /* | 566 | /* |
516 | * When we are scanning the channels for an SMI, this will | 567 | * When we are scanning the channels for an SMI, this will |
@@ -536,12 +587,12 @@ struct ipmi_smi { | |||
536 | }; | 587 | }; |
537 | #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) | 588 | #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) |
538 | 589 | ||
539 | static void __get_guid(ipmi_smi_t intf); | 590 | static void __get_guid(struct ipmi_smi *intf); |
540 | static void __ipmi_bmc_unregister(ipmi_smi_t intf); | 591 | static void __ipmi_bmc_unregister(struct ipmi_smi *intf); |
541 | static int __ipmi_bmc_register(ipmi_smi_t intf, | 592 | static int __ipmi_bmc_register(struct ipmi_smi *intf, |
542 | struct ipmi_device_id *id, | 593 | struct ipmi_device_id *id, |
543 | bool guid_set, guid_t *guid, int intf_num); | 594 | bool guid_set, guid_t *guid, int intf_num); |
544 | static int __scan_channels(ipmi_smi_t intf, struct ipmi_device_id *id); | 595 | static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); |
545 | 596 | ||
546 | 597 | ||
547 | /** | 598 | /** |
@@ -560,6 +611,7 @@ static DEFINE_MUTEX(ipmidriver_mutex); | |||
560 | 611 | ||
561 | static LIST_HEAD(ipmi_interfaces); | 612 | static LIST_HEAD(ipmi_interfaces); |
562 | static DEFINE_MUTEX(ipmi_interfaces_mutex); | 613 | static DEFINE_MUTEX(ipmi_interfaces_mutex); |
614 | DEFINE_STATIC_SRCU(ipmi_interfaces_srcu); | ||
563 | 615 | ||
564 | /* | 616 | /* |
565 | * List of watchers that want to know when smi's are added and deleted. | 617 | * List of watchers that want to know when smi's are added and deleted. |
@@ -620,7 +672,7 @@ static void free_smi_msg_list(struct list_head *q) | |||
620 | } | 672 | } |
621 | } | 673 | } |
622 | 674 | ||
623 | static void clean_up_interface_data(ipmi_smi_t intf) | 675 | static void clean_up_interface_data(struct ipmi_smi *intf) |
624 | { | 676 | { |
625 | int i; | 677 | int i; |
626 | struct cmd_rcvr *rcvr, *rcvr2; | 678 | struct cmd_rcvr *rcvr, *rcvr2; |
@@ -652,7 +704,7 @@ static void clean_up_interface_data(ipmi_smi_t intf) | |||
652 | 704 | ||
653 | static void intf_free(struct kref *ref) | 705 | static void intf_free(struct kref *ref) |
654 | { | 706 | { |
655 | ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount); | 707 | struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); |
656 | 708 | ||
657 | clean_up_interface_data(intf); | 709 | clean_up_interface_data(intf); |
658 | kfree(intf); | 710 | kfree(intf); |
@@ -660,65 +712,39 @@ static void intf_free(struct kref *ref) | |||
660 | 712 | ||
661 | struct watcher_entry { | 713 | struct watcher_entry { |
662 | int intf_num; | 714 | int intf_num; |
663 | ipmi_smi_t intf; | 715 | struct ipmi_smi *intf; |
664 | struct list_head link; | 716 | struct list_head link; |
665 | }; | 717 | }; |
666 | 718 | ||
667 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) | 719 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) |
668 | { | 720 | { |
669 | ipmi_smi_t intf; | 721 | struct ipmi_smi *intf; |
670 | LIST_HEAD(to_deliver); | 722 | int index; |
671 | struct watcher_entry *e, *e2; | ||
672 | 723 | ||
673 | mutex_lock(&smi_watchers_mutex); | 724 | mutex_lock(&smi_watchers_mutex); |
674 | 725 | ||
675 | mutex_lock(&ipmi_interfaces_mutex); | ||
676 | |||
677 | /* Build a list of things to deliver. */ | ||
678 | list_for_each_entry(intf, &ipmi_interfaces, link) { | ||
679 | if (intf->intf_num == -1) | ||
680 | continue; | ||
681 | e = kmalloc(sizeof(*e), GFP_KERNEL); | ||
682 | if (!e) | ||
683 | goto out_err; | ||
684 | kref_get(&intf->refcount); | ||
685 | e->intf = intf; | ||
686 | e->intf_num = intf->intf_num; | ||
687 | list_add_tail(&e->link, &to_deliver); | ||
688 | } | ||
689 | |||
690 | /* We will succeed, so add it to the list. */ | ||
691 | list_add(&watcher->link, &smi_watchers); | 726 | list_add(&watcher->link, &smi_watchers); |
692 | 727 | ||
693 | mutex_unlock(&ipmi_interfaces_mutex); | 728 | index = srcu_read_lock(&ipmi_interfaces_srcu); |
729 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | ||
730 | int intf_num = READ_ONCE(intf->intf_num); | ||
694 | 731 | ||
695 | list_for_each_entry_safe(e, e2, &to_deliver, link) { | 732 | if (intf_num == -1) |
696 | list_del(&e->link); | 733 | continue; |
697 | watcher->new_smi(e->intf_num, e->intf->si_dev); | 734 | watcher->new_smi(intf_num, intf->si_dev); |
698 | kref_put(&e->intf->refcount, intf_free); | ||
699 | kfree(e); | ||
700 | } | 735 | } |
736 | srcu_read_unlock(&ipmi_interfaces_srcu, index); | ||
701 | 737 | ||
702 | mutex_unlock(&smi_watchers_mutex); | 738 | mutex_unlock(&smi_watchers_mutex); |
703 | 739 | ||
704 | return 0; | 740 | return 0; |
705 | |||
706 | out_err: | ||
707 | mutex_unlock(&ipmi_interfaces_mutex); | ||
708 | mutex_unlock(&smi_watchers_mutex); | ||
709 | list_for_each_entry_safe(e, e2, &to_deliver, link) { | ||
710 | list_del(&e->link); | ||
711 | kref_put(&e->intf->refcount, intf_free); | ||
712 | kfree(e); | ||
713 | } | ||
714 | return -ENOMEM; | ||
715 | } | 741 | } |
716 | EXPORT_SYMBOL(ipmi_smi_watcher_register); | 742 | EXPORT_SYMBOL(ipmi_smi_watcher_register); |
717 | 743 | ||
718 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) | 744 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) |
719 | { | 745 | { |
720 | mutex_lock(&smi_watchers_mutex); | 746 | mutex_lock(&smi_watchers_mutex); |
721 | list_del(&(watcher->link)); | 747 | list_del(&watcher->link); |
722 | mutex_unlock(&smi_watchers_mutex); | 748 | mutex_unlock(&smi_watchers_mutex); |
723 | return 0; | 749 | return 0; |
724 | } | 750 | } |
@@ -732,12 +758,14 @@ call_smi_watchers(int i, struct device *dev) | |||
732 | { | 758 | { |
733 | struct ipmi_smi_watcher *w; | 759 | struct ipmi_smi_watcher *w; |
734 | 760 | ||
761 | mutex_lock(&smi_watchers_mutex); | ||
735 | list_for_each_entry(w, &smi_watchers, link) { | 762 | list_for_each_entry(w, &smi_watchers, link) { |
736 | if (try_module_get(w->owner)) { | 763 | if (try_module_get(w->owner)) { |
737 | w->new_smi(i, dev); | 764 | w->new_smi(i, dev); |
738 | module_put(w->owner); | 765 | module_put(w->owner); |
739 | } | 766 | } |
740 | } | 767 | } |
768 | mutex_unlock(&smi_watchers_mutex); | ||
741 | } | 769 | } |
742 | 770 | ||
743 | static int | 771 | static int |
@@ -831,18 +859,17 @@ unsigned int ipmi_addr_length(int addr_type) | |||
831 | } | 859 | } |
832 | EXPORT_SYMBOL(ipmi_addr_length); | 860 | EXPORT_SYMBOL(ipmi_addr_length); |
833 | 861 | ||
834 | static void deliver_response(struct ipmi_recv_msg *msg) | 862 | static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) |
835 | { | 863 | { |
836 | if (!msg->user) { | 864 | int rv = 0; |
837 | ipmi_smi_t intf = msg->user_msg_data; | ||
838 | 865 | ||
866 | if (!msg->user) { | ||
839 | /* Special handling for NULL users. */ | 867 | /* Special handling for NULL users. */ |
840 | if (intf->null_user_handler) { | 868 | if (intf->null_user_handler) { |
841 | intf->null_user_handler(intf, msg); | 869 | intf->null_user_handler(intf, msg); |
842 | ipmi_inc_stat(intf, handled_local_responses); | ||
843 | } else { | 870 | } else { |
844 | /* No handler, so give up. */ | 871 | /* No handler, so give up. */ |
845 | ipmi_inc_stat(intf, unhandled_local_responses); | 872 | rv = -EINVAL; |
846 | } | 873 | } |
847 | ipmi_free_recv_msg(msg); | 874 | ipmi_free_recv_msg(msg); |
848 | } else if (!oops_in_progress) { | 875 | } else if (!oops_in_progress) { |
@@ -851,21 +878,40 @@ static void deliver_response(struct ipmi_recv_msg *msg) | |||
851 | * receive handler doesn't much meaning and has a deadlock | 878 | * receive handler doesn't much meaning and has a deadlock |
852 | * risk. At this moment, simply skip it in that case. | 879 | * risk. At this moment, simply skip it in that case. |
853 | */ | 880 | */ |
881 | int index; | ||
882 | struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); | ||
854 | 883 | ||
855 | ipmi_user_t user = msg->user; | 884 | if (user) { |
856 | user->handler->ipmi_recv_hndl(msg, user->handler_data); | 885 | user->handler->ipmi_recv_hndl(msg, user->handler_data); |
886 | release_ipmi_user(msg->user, index); | ||
887 | } else { | ||
888 | /* User went away, give up. */ | ||
889 | ipmi_free_recv_msg(msg); | ||
890 | rv = -EINVAL; | ||
891 | } | ||
857 | } | 892 | } |
893 | |||
894 | return rv; | ||
858 | } | 895 | } |
859 | 896 | ||
860 | static void | 897 | static void deliver_local_response(struct ipmi_smi *intf, |
861 | deliver_err_response(struct ipmi_recv_msg *msg, int err) | 898 | struct ipmi_recv_msg *msg) |
899 | { | ||
900 | if (deliver_response(intf, msg)) | ||
901 | ipmi_inc_stat(intf, unhandled_local_responses); | ||
902 | else | ||
903 | ipmi_inc_stat(intf, handled_local_responses); | ||
904 | } | ||
905 | |||
906 | static void deliver_err_response(struct ipmi_smi *intf, | ||
907 | struct ipmi_recv_msg *msg, int err) | ||
862 | { | 908 | { |
863 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 909 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
864 | msg->msg_data[0] = err; | 910 | msg->msg_data[0] = err; |
865 | msg->msg.netfn |= 1; /* Convert to a response. */ | 911 | msg->msg.netfn |= 1; /* Convert to a response. */ |
866 | msg->msg.data_len = 1; | 912 | msg->msg.data_len = 1; |
867 | msg->msg.data = msg->msg_data; | 913 | msg->msg.data = msg->msg_data; |
868 | deliver_response(msg); | 914 | deliver_local_response(intf, msg); |
869 | } | 915 | } |
870 | 916 | ||
871 | /* | 917 | /* |
@@ -873,7 +919,7 @@ deliver_err_response(struct ipmi_recv_msg *msg, int err) | |||
873 | * message with the given timeout to the sequence table. This must be | 919 | * message with the given timeout to the sequence table. This must be |
874 | * called with the interface's seq_lock held. | 920 | * called with the interface's seq_lock held. |
875 | */ | 921 | */ |
876 | static int intf_next_seq(ipmi_smi_t intf, | 922 | static int intf_next_seq(struct ipmi_smi *intf, |
877 | struct ipmi_recv_msg *recv_msg, | 923 | struct ipmi_recv_msg *recv_msg, |
878 | unsigned long timeout, | 924 | unsigned long timeout, |
879 | int retries, | 925 | int retries, |
@@ -884,6 +930,11 @@ static int intf_next_seq(ipmi_smi_t intf, | |||
884 | int rv = 0; | 930 | int rv = 0; |
885 | unsigned int i; | 931 | unsigned int i; |
886 | 932 | ||
933 | if (timeout == 0) | ||
934 | timeout = default_retry_ms; | ||
935 | if (retries < 0) | ||
936 | retries = default_max_retries; | ||
937 | |||
887 | for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; | 938 | for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; |
888 | i = (i+1)%IPMI_IPMB_NUM_SEQ) { | 939 | i = (i+1)%IPMI_IPMB_NUM_SEQ) { |
889 | if (!intf->seq_table[i].inuse) | 940 | if (!intf->seq_table[i].inuse) |
@@ -921,7 +972,7 @@ static int intf_next_seq(ipmi_smi_t intf, | |||
921 | * guard against message coming in after their timeout and the | 972 | * guard against message coming in after their timeout and the |
922 | * sequence number being reused). | 973 | * sequence number being reused). |
923 | */ | 974 | */ |
924 | static int intf_find_seq(ipmi_smi_t intf, | 975 | static int intf_find_seq(struct ipmi_smi *intf, |
925 | unsigned char seq, | 976 | unsigned char seq, |
926 | short channel, | 977 | short channel, |
927 | unsigned char cmd, | 978 | unsigned char cmd, |
@@ -935,26 +986,26 @@ static int intf_find_seq(ipmi_smi_t intf, | |||
935 | if (seq >= IPMI_IPMB_NUM_SEQ) | 986 | if (seq >= IPMI_IPMB_NUM_SEQ) |
936 | return -EINVAL; | 987 | return -EINVAL; |
937 | 988 | ||
938 | spin_lock_irqsave(&(intf->seq_lock), flags); | 989 | spin_lock_irqsave(&intf->seq_lock, flags); |
939 | if (intf->seq_table[seq].inuse) { | 990 | if (intf->seq_table[seq].inuse) { |
940 | struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; | 991 | struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; |
941 | 992 | ||
942 | if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) | 993 | if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) |
943 | && (msg->msg.netfn == netfn) | 994 | && (msg->msg.netfn == netfn) |
944 | && (ipmi_addr_equal(addr, &(msg->addr)))) { | 995 | && (ipmi_addr_equal(addr, &msg->addr))) { |
945 | *recv_msg = msg; | 996 | *recv_msg = msg; |
946 | intf->seq_table[seq].inuse = 0; | 997 | intf->seq_table[seq].inuse = 0; |
947 | rv = 0; | 998 | rv = 0; |
948 | } | 999 | } |
949 | } | 1000 | } |
950 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 1001 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
951 | 1002 | ||
952 | return rv; | 1003 | return rv; |
953 | } | 1004 | } |
954 | 1005 | ||
955 | 1006 | ||
956 | /* Start the timer for a specific sequence table entry. */ | 1007 | /* Start the timer for a specific sequence table entry. */ |
957 | static int intf_start_seq_timer(ipmi_smi_t intf, | 1008 | static int intf_start_seq_timer(struct ipmi_smi *intf, |
958 | long msgid) | 1009 | long msgid) |
959 | { | 1010 | { |
960 | int rv = -ENODEV; | 1011 | int rv = -ENODEV; |
@@ -965,24 +1016,24 @@ static int intf_start_seq_timer(ipmi_smi_t intf, | |||
965 | 1016 | ||
966 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); | 1017 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); |
967 | 1018 | ||
968 | spin_lock_irqsave(&(intf->seq_lock), flags); | 1019 | spin_lock_irqsave(&intf->seq_lock, flags); |
969 | /* | 1020 | /* |
970 | * We do this verification because the user can be deleted | 1021 | * We do this verification because the user can be deleted |
971 | * while a message is outstanding. | 1022 | * while a message is outstanding. |
972 | */ | 1023 | */ |
973 | if ((intf->seq_table[seq].inuse) | 1024 | if ((intf->seq_table[seq].inuse) |
974 | && (intf->seq_table[seq].seqid == seqid)) { | 1025 | && (intf->seq_table[seq].seqid == seqid)) { |
975 | struct seq_table *ent = &(intf->seq_table[seq]); | 1026 | struct seq_table *ent = &intf->seq_table[seq]; |
976 | ent->timeout = ent->orig_timeout; | 1027 | ent->timeout = ent->orig_timeout; |
977 | rv = 0; | 1028 | rv = 0; |
978 | } | 1029 | } |
979 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 1030 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
980 | 1031 | ||
981 | return rv; | 1032 | return rv; |
982 | } | 1033 | } |
983 | 1034 | ||
984 | /* Got an error for the send message for a specific sequence number. */ | 1035 | /* Got an error for the send message for a specific sequence number. */ |
985 | static int intf_err_seq(ipmi_smi_t intf, | 1036 | static int intf_err_seq(struct ipmi_smi *intf, |
986 | long msgid, | 1037 | long msgid, |
987 | unsigned int err) | 1038 | unsigned int err) |
988 | { | 1039 | { |
@@ -995,23 +1046,23 @@ static int intf_err_seq(ipmi_smi_t intf, | |||
995 | 1046 | ||
996 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); | 1047 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); |
997 | 1048 | ||
998 | spin_lock_irqsave(&(intf->seq_lock), flags); | 1049 | spin_lock_irqsave(&intf->seq_lock, flags); |
999 | /* | 1050 | /* |
1000 | * We do this verification because the user can be deleted | 1051 | * We do this verification because the user can be deleted |
1001 | * while a message is outstanding. | 1052 | * while a message is outstanding. |
1002 | */ | 1053 | */ |
1003 | if ((intf->seq_table[seq].inuse) | 1054 | if ((intf->seq_table[seq].inuse) |
1004 | && (intf->seq_table[seq].seqid == seqid)) { | 1055 | && (intf->seq_table[seq].seqid == seqid)) { |
1005 | struct seq_table *ent = &(intf->seq_table[seq]); | 1056 | struct seq_table *ent = &intf->seq_table[seq]; |
1006 | 1057 | ||
1007 | ent->inuse = 0; | 1058 | ent->inuse = 0; |
1008 | msg = ent->recv_msg; | 1059 | msg = ent->recv_msg; |
1009 | rv = 0; | 1060 | rv = 0; |
1010 | } | 1061 | } |
1011 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 1062 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
1012 | 1063 | ||
1013 | if (msg) | 1064 | if (msg) |
1014 | deliver_err_response(msg, err); | 1065 | deliver_err_response(intf, msg, err); |
1015 | 1066 | ||
1016 | return rv; | 1067 | return rv; |
1017 | } | 1068 | } |
@@ -1020,12 +1071,12 @@ static int intf_err_seq(ipmi_smi_t intf, | |||
1020 | int ipmi_create_user(unsigned int if_num, | 1071 | int ipmi_create_user(unsigned int if_num, |
1021 | const struct ipmi_user_hndl *handler, | 1072 | const struct ipmi_user_hndl *handler, |
1022 | void *handler_data, | 1073 | void *handler_data, |
1023 | ipmi_user_t *user) | 1074 | struct ipmi_user **user) |
1024 | { | 1075 | { |
1025 | unsigned long flags; | 1076 | unsigned long flags; |
1026 | ipmi_user_t new_user; | 1077 | struct ipmi_user *new_user; |
1027 | int rv = 0; | 1078 | int rv = 0, index; |
1028 | ipmi_smi_t intf; | 1079 | struct ipmi_smi *intf; |
1029 | 1080 | ||
1030 | /* | 1081 | /* |
1031 | * There is no module usecount here, because it's not | 1082 | * There is no module usecount here, because it's not |
@@ -1059,7 +1110,7 @@ int ipmi_create_user(unsigned int if_num, | |||
1059 | if (!new_user) | 1110 | if (!new_user) |
1060 | return -ENOMEM; | 1111 | return -ENOMEM; |
1061 | 1112 | ||
1062 | mutex_lock(&ipmi_interfaces_mutex); | 1113 | index = srcu_read_lock(&ipmi_interfaces_srcu); |
1063 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 1114 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
1064 | if (intf->intf_num == if_num) | 1115 | if (intf->intf_num == if_num) |
1065 | goto found; | 1116 | goto found; |
@@ -1069,6 +1120,10 @@ int ipmi_create_user(unsigned int if_num, | |||
1069 | goto out_kfree; | 1120 | goto out_kfree; |
1070 | 1121 | ||
1071 | found: | 1122 | found: |
1123 | rv = init_srcu_struct(&new_user->release_barrier); | ||
1124 | if (rv) | ||
1125 | goto out_kfree; | ||
1126 | |||
1072 | /* Note that each existing user holds a refcount to the interface. */ | 1127 | /* Note that each existing user holds a refcount to the interface. */ |
1073 | kref_get(&intf->refcount); | 1128 | kref_get(&intf->refcount); |
1074 | 1129 | ||
@@ -1078,26 +1133,7 @@ int ipmi_create_user(unsigned int if_num, | |||
1078 | new_user->intf = intf; | 1133 | new_user->intf = intf; |
1079 | new_user->gets_events = false; | 1134 | new_user->gets_events = false; |
1080 | 1135 | ||
1081 | if (!try_module_get(intf->handlers->owner)) { | 1136 | rcu_assign_pointer(new_user->self, new_user); |
1082 | rv = -ENODEV; | ||
1083 | goto out_kref; | ||
1084 | } | ||
1085 | |||
1086 | if (intf->handlers->inc_usecount) { | ||
1087 | rv = intf->handlers->inc_usecount(intf->send_info); | ||
1088 | if (rv) { | ||
1089 | module_put(intf->handlers->owner); | ||
1090 | goto out_kref; | ||
1091 | } | ||
1092 | } | ||
1093 | |||
1094 | /* | ||
1095 | * Hold the lock so intf->handlers is guaranteed to be good | ||
1096 | * until now | ||
1097 | */ | ||
1098 | mutex_unlock(&ipmi_interfaces_mutex); | ||
1099 | |||
1100 | new_user->valid = true; | ||
1101 | spin_lock_irqsave(&intf->seq_lock, flags); | 1137 | spin_lock_irqsave(&intf->seq_lock, flags); |
1102 | list_add_rcu(&new_user->link, &intf->users); | 1138 | list_add_rcu(&new_user->link, &intf->users); |
1103 | spin_unlock_irqrestore(&intf->seq_lock, flags); | 1139 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
@@ -1106,13 +1142,12 @@ int ipmi_create_user(unsigned int if_num, | |||
1106 | if (atomic_inc_return(&intf->event_waiters) == 1) | 1142 | if (atomic_inc_return(&intf->event_waiters) == 1) |
1107 | need_waiter(intf); | 1143 | need_waiter(intf); |
1108 | } | 1144 | } |
1145 | srcu_read_unlock(&ipmi_interfaces_srcu, index); | ||
1109 | *user = new_user; | 1146 | *user = new_user; |
1110 | return 0; | 1147 | return 0; |
1111 | 1148 | ||
1112 | out_kref: | ||
1113 | kref_put(&intf->refcount, intf_free); | ||
1114 | out_kfree: | 1149 | out_kfree: |
1115 | mutex_unlock(&ipmi_interfaces_mutex); | 1150 | srcu_read_unlock(&ipmi_interfaces_srcu, index); |
1116 | kfree(new_user); | 1151 | kfree(new_user); |
1117 | return rv; | 1152 | return rv; |
1118 | } | 1153 | } |
@@ -1120,26 +1155,25 @@ EXPORT_SYMBOL(ipmi_create_user); | |||
1120 | 1155 | ||
1121 | int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) | 1156 | int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) |
1122 | { | 1157 | { |
1123 | int rv = 0; | 1158 | int rv, index; |
1124 | ipmi_smi_t intf; | 1159 | struct ipmi_smi *intf; |
1125 | const struct ipmi_smi_handlers *handlers; | ||
1126 | 1160 | ||
1127 | mutex_lock(&ipmi_interfaces_mutex); | 1161 | index = srcu_read_lock(&ipmi_interfaces_srcu); |
1128 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 1162 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
1129 | if (intf->intf_num == if_num) | 1163 | if (intf->intf_num == if_num) |
1130 | goto found; | 1164 | goto found; |
1131 | } | 1165 | } |
1166 | srcu_read_unlock(&ipmi_interfaces_srcu, index); | ||
1167 | |||
1132 | /* Not found, return an error */ | 1168 | /* Not found, return an error */ |
1133 | rv = -EINVAL; | 1169 | return -EINVAL; |
1134 | mutex_unlock(&ipmi_interfaces_mutex); | ||
1135 | return rv; | ||
1136 | 1170 | ||
1137 | found: | 1171 | found: |
1138 | handlers = intf->handlers; | 1172 | if (!intf->handlers->get_smi_info) |
1139 | rv = -ENOSYS; | 1173 | rv = -ENOTTY; |
1140 | if (handlers->get_smi_info) | 1174 | else |
1141 | rv = handlers->get_smi_info(intf->send_info, data); | 1175 | rv = intf->handlers->get_smi_info(intf->send_info, data); |
1142 | mutex_unlock(&ipmi_interfaces_mutex); | 1176 | srcu_read_unlock(&ipmi_interfaces_srcu, index); |
1143 | 1177 | ||
1144 | return rv; | 1178 | return rv; |
1145 | } | 1179 | } |
@@ -1147,19 +1181,34 @@ EXPORT_SYMBOL(ipmi_get_smi_info); | |||
1147 | 1181 | ||
1148 | static void free_user(struct kref *ref) | 1182 | static void free_user(struct kref *ref) |
1149 | { | 1183 | { |
1150 | ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); | 1184 | struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); |
1151 | kfree(user); | 1185 | kfree(user); |
1152 | } | 1186 | } |
1153 | 1187 | ||
1154 | int ipmi_destroy_user(ipmi_user_t user) | 1188 | static void _ipmi_destroy_user(struct ipmi_user *user) |
1155 | { | 1189 | { |
1156 | ipmi_smi_t intf = user->intf; | 1190 | struct ipmi_smi *intf = user->intf; |
1157 | int i; | 1191 | int i; |
1158 | unsigned long flags; | 1192 | unsigned long flags; |
1159 | struct cmd_rcvr *rcvr; | 1193 | struct cmd_rcvr *rcvr; |
1160 | struct cmd_rcvr *rcvrs = NULL; | 1194 | struct cmd_rcvr *rcvrs = NULL; |
1161 | 1195 | ||
1162 | user->valid = false; | 1196 | if (!acquire_ipmi_user(user, &i)) { |
1197 | /* | ||
1198 | * The user has already been cleaned up, just make sure | ||
1199 | * nothing is using it and return. | ||
1200 | */ | ||
1201 | synchronize_srcu(&user->release_barrier); | ||
1202 | return; | ||
1203 | } | ||
1204 | |||
1205 | rcu_assign_pointer(user->self, NULL); | ||
1206 | release_ipmi_user(user, i); | ||
1207 | |||
1208 | synchronize_srcu(&user->release_barrier); | ||
1209 | |||
1210 | if (user->handler->shutdown) | ||
1211 | user->handler->shutdown(user->handler_data); | ||
1163 | 1212 | ||
1164 | if (user->handler->ipmi_watchdog_pretimeout) | 1213 | if (user->handler->ipmi_watchdog_pretimeout) |
1165 | atomic_dec(&intf->event_waiters); | 1214 | atomic_dec(&intf->event_waiters); |
@@ -1184,7 +1233,7 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
1184 | * Remove the user from the command receiver's table. First | 1233 | * Remove the user from the command receiver's table. First |
1185 | * we build a list of everything (not using the standard link, | 1234 | * we build a list of everything (not using the standard link, |
1186 | * since other things may be using it till we do | 1235 | * since other things may be using it till we do |
1187 | * synchronize_rcu()) then free everything in that list. | 1236 | * synchronize_srcu()) then free everything in that list. |
1188 | */ | 1237 | */ |
1189 | mutex_lock(&intf->cmd_rcvrs_mutex); | 1238 | mutex_lock(&intf->cmd_rcvrs_mutex); |
1190 | list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { | 1239 | list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { |
@@ -1202,109 +1251,156 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
1202 | kfree(rcvr); | 1251 | kfree(rcvr); |
1203 | } | 1252 | } |
1204 | 1253 | ||
1205 | mutex_lock(&ipmi_interfaces_mutex); | ||
1206 | if (intf->handlers) { | ||
1207 | module_put(intf->handlers->owner); | ||
1208 | if (intf->handlers->dec_usecount) | ||
1209 | intf->handlers->dec_usecount(intf->send_info); | ||
1210 | } | ||
1211 | mutex_unlock(&ipmi_interfaces_mutex); | ||
1212 | |||
1213 | kref_put(&intf->refcount, intf_free); | 1254 | kref_put(&intf->refcount, intf_free); |
1255 | } | ||
1214 | 1256 | ||
1257 | int ipmi_destroy_user(struct ipmi_user *user) | ||
1258 | { | ||
1259 | _ipmi_destroy_user(user); | ||
1260 | |||
1261 | cleanup_srcu_struct(&user->release_barrier); | ||
1215 | kref_put(&user->refcount, free_user); | 1262 | kref_put(&user->refcount, free_user); |
1216 | 1263 | ||
1217 | return 0; | 1264 | return 0; |
1218 | } | 1265 | } |
1219 | EXPORT_SYMBOL(ipmi_destroy_user); | 1266 | EXPORT_SYMBOL(ipmi_destroy_user); |
1220 | 1267 | ||
1221 | int ipmi_get_version(ipmi_user_t user, | 1268 | int ipmi_get_version(struct ipmi_user *user, |
1222 | unsigned char *major, | 1269 | unsigned char *major, |
1223 | unsigned char *minor) | 1270 | unsigned char *minor) |
1224 | { | 1271 | { |
1225 | struct ipmi_device_id id; | 1272 | struct ipmi_device_id id; |
1226 | int rv; | 1273 | int rv, index; |
1227 | 1274 | ||
1228 | rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); | 1275 | user = acquire_ipmi_user(user, &index); |
1229 | if (rv) | 1276 | if (!user) |
1230 | return rv; | 1277 | return -ENODEV; |
1231 | 1278 | ||
1232 | *major = ipmi_version_major(&id); | 1279 | rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); |
1233 | *minor = ipmi_version_minor(&id); | 1280 | if (!rv) { |
1281 | *major = ipmi_version_major(&id); | ||
1282 | *minor = ipmi_version_minor(&id); | ||
1283 | } | ||
1284 | release_ipmi_user(user, index); | ||
1234 | 1285 | ||
1235 | return 0; | 1286 | return rv; |
1236 | } | 1287 | } |
1237 | EXPORT_SYMBOL(ipmi_get_version); | 1288 | EXPORT_SYMBOL(ipmi_get_version); |
1238 | 1289 | ||
1239 | int ipmi_set_my_address(ipmi_user_t user, | 1290 | int ipmi_set_my_address(struct ipmi_user *user, |
1240 | unsigned int channel, | 1291 | unsigned int channel, |
1241 | unsigned char address) | 1292 | unsigned char address) |
1242 | { | 1293 | { |
1294 | int index, rv = 0; | ||
1295 | |||
1296 | user = acquire_ipmi_user(user, &index); | ||
1297 | if (!user) | ||
1298 | return -ENODEV; | ||
1299 | |||
1243 | if (channel >= IPMI_MAX_CHANNELS) | 1300 | if (channel >= IPMI_MAX_CHANNELS) |
1244 | return -EINVAL; | 1301 | rv = -EINVAL; |
1245 | user->intf->addrinfo[channel].address = address; | 1302 | else |
1246 | return 0; | 1303 | user->intf->addrinfo[channel].address = address; |
1304 | release_ipmi_user(user, index); | ||
1305 | |||
1306 | return rv; | ||
1247 | } | 1307 | } |
1248 | EXPORT_SYMBOL(ipmi_set_my_address); | 1308 | EXPORT_SYMBOL(ipmi_set_my_address); |
1249 | 1309 | ||
1250 | int ipmi_get_my_address(ipmi_user_t user, | 1310 | int ipmi_get_my_address(struct ipmi_user *user, |
1251 | unsigned int channel, | 1311 | unsigned int channel, |
1252 | unsigned char *address) | 1312 | unsigned char *address) |
1253 | { | 1313 | { |
1314 | int index, rv = 0; | ||
1315 | |||
1316 | user = acquire_ipmi_user(user, &index); | ||
1317 | if (!user) | ||
1318 | return -ENODEV; | ||
1319 | |||
1254 | if (channel >= IPMI_MAX_CHANNELS) | 1320 | if (channel >= IPMI_MAX_CHANNELS) |
1255 | return -EINVAL; | 1321 | rv = -EINVAL; |
1256 | *address = user->intf->addrinfo[channel].address; | 1322 | else |
1257 | return 0; | 1323 | *address = user->intf->addrinfo[channel].address; |
1324 | release_ipmi_user(user, index); | ||
1325 | |||
1326 | return rv; | ||
1258 | } | 1327 | } |
1259 | EXPORT_SYMBOL(ipmi_get_my_address); | 1328 | EXPORT_SYMBOL(ipmi_get_my_address); |
1260 | 1329 | ||
1261 | int ipmi_set_my_LUN(ipmi_user_t user, | 1330 | int ipmi_set_my_LUN(struct ipmi_user *user, |
1262 | unsigned int channel, | 1331 | unsigned int channel, |
1263 | unsigned char LUN) | 1332 | unsigned char LUN) |
1264 | { | 1333 | { |
1334 | int index, rv = 0; | ||
1335 | |||
1336 | user = acquire_ipmi_user(user, &index); | ||
1337 | if (!user) | ||
1338 | return -ENODEV; | ||
1339 | |||
1265 | if (channel >= IPMI_MAX_CHANNELS) | 1340 | if (channel >= IPMI_MAX_CHANNELS) |
1266 | return -EINVAL; | 1341 | rv = -EINVAL; |
1267 | user->intf->addrinfo[channel].lun = LUN & 0x3; | 1342 | else |
1343 | user->intf->addrinfo[channel].lun = LUN & 0x3; | ||
1344 | release_ipmi_user(user, index); | ||
1345 | |||
1268 | return 0; | 1346 | return 0; |
1269 | } | 1347 | } |
1270 | EXPORT_SYMBOL(ipmi_set_my_LUN); | 1348 | EXPORT_SYMBOL(ipmi_set_my_LUN); |
1271 | 1349 | ||
1272 | int ipmi_get_my_LUN(ipmi_user_t user, | 1350 | int ipmi_get_my_LUN(struct ipmi_user *user, |
1273 | unsigned int channel, | 1351 | unsigned int channel, |
1274 | unsigned char *address) | 1352 | unsigned char *address) |
1275 | { | 1353 | { |
1354 | int index, rv = 0; | ||
1355 | |||
1356 | user = acquire_ipmi_user(user, &index); | ||
1357 | if (!user) | ||
1358 | return -ENODEV; | ||
1359 | |||
1276 | if (channel >= IPMI_MAX_CHANNELS) | 1360 | if (channel >= IPMI_MAX_CHANNELS) |
1277 | return -EINVAL; | 1361 | rv = -EINVAL; |
1278 | *address = user->intf->addrinfo[channel].lun; | 1362 | else |
1279 | return 0; | 1363 | *address = user->intf->addrinfo[channel].lun; |
1364 | release_ipmi_user(user, index); | ||
1365 | |||
1366 | return rv; | ||
1280 | } | 1367 | } |
1281 | EXPORT_SYMBOL(ipmi_get_my_LUN); | 1368 | EXPORT_SYMBOL(ipmi_get_my_LUN); |
1282 | 1369 | ||
1283 | int ipmi_get_maintenance_mode(ipmi_user_t user) | 1370 | int ipmi_get_maintenance_mode(struct ipmi_user *user) |
1284 | { | 1371 | { |
1285 | int mode; | 1372 | int mode, index; |
1286 | unsigned long flags; | 1373 | unsigned long flags; |
1287 | 1374 | ||
1375 | user = acquire_ipmi_user(user, &index); | ||
1376 | if (!user) | ||
1377 | return -ENODEV; | ||
1378 | |||
1288 | spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); | 1379 | spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); |
1289 | mode = user->intf->maintenance_mode; | 1380 | mode = user->intf->maintenance_mode; |
1290 | spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); | 1381 | spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); |
1382 | release_ipmi_user(user, index); | ||
1291 | 1383 | ||
1292 | return mode; | 1384 | return mode; |
1293 | } | 1385 | } |
1294 | EXPORT_SYMBOL(ipmi_get_maintenance_mode); | 1386 | EXPORT_SYMBOL(ipmi_get_maintenance_mode); |
1295 | 1387 | ||
1296 | static void maintenance_mode_update(ipmi_smi_t intf) | 1388 | static void maintenance_mode_update(struct ipmi_smi *intf) |
1297 | { | 1389 | { |
1298 | if (intf->handlers->set_maintenance_mode) | 1390 | if (intf->handlers->set_maintenance_mode) |
1299 | intf->handlers->set_maintenance_mode( | 1391 | intf->handlers->set_maintenance_mode( |
1300 | intf->send_info, intf->maintenance_mode_enable); | 1392 | intf->send_info, intf->maintenance_mode_enable); |
1301 | } | 1393 | } |
1302 | 1394 | ||
1303 | int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) | 1395 | int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) |
1304 | { | 1396 | { |
1305 | int rv = 0; | 1397 | int rv = 0, index; |
1306 | unsigned long flags; | 1398 | unsigned long flags; |
1307 | ipmi_smi_t intf = user->intf; | 1399 | struct ipmi_smi *intf = user->intf; |
1400 | |||
1401 | user = acquire_ipmi_user(user, &index); | ||
1402 | if (!user) | ||
1403 | return -ENODEV; | ||
1308 | 1404 | ||
1309 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); | 1405 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); |
1310 | if (intf->maintenance_mode != mode) { | 1406 | if (intf->maintenance_mode != mode) { |
@@ -1332,17 +1428,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) | |||
1332 | } | 1428 | } |
1333 | out_unlock: | 1429 | out_unlock: |
1334 | spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); | 1430 | spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); |
1431 | release_ipmi_user(user, index); | ||
1335 | 1432 | ||
1336 | return rv; | 1433 | return rv; |
1337 | } | 1434 | } |
1338 | EXPORT_SYMBOL(ipmi_set_maintenance_mode); | 1435 | EXPORT_SYMBOL(ipmi_set_maintenance_mode); |
1339 | 1436 | ||
1340 | int ipmi_set_gets_events(ipmi_user_t user, bool val) | 1437 | int ipmi_set_gets_events(struct ipmi_user *user, bool val) |
1341 | { | 1438 | { |
1342 | unsigned long flags; | 1439 | unsigned long flags; |
1343 | ipmi_smi_t intf = user->intf; | 1440 | struct ipmi_smi *intf = user->intf; |
1344 | struct ipmi_recv_msg *msg, *msg2; | 1441 | struct ipmi_recv_msg *msg, *msg2; |
1345 | struct list_head msgs; | 1442 | struct list_head msgs; |
1443 | int index; | ||
1444 | |||
1445 | user = acquire_ipmi_user(user, &index); | ||
1446 | if (!user) | ||
1447 | return -ENODEV; | ||
1346 | 1448 | ||
1347 | INIT_LIST_HEAD(&msgs); | 1449 | INIT_LIST_HEAD(&msgs); |
1348 | 1450 | ||
@@ -1383,7 +1485,7 @@ int ipmi_set_gets_events(ipmi_user_t user, bool val) | |||
1383 | list_for_each_entry_safe(msg, msg2, &msgs, link) { | 1485 | list_for_each_entry_safe(msg, msg2, &msgs, link) { |
1384 | msg->user = user; | 1486 | msg->user = user; |
1385 | kref_get(&user->refcount); | 1487 | kref_get(&user->refcount); |
1386 | deliver_response(msg); | 1488 | deliver_local_response(intf, msg); |
1387 | } | 1489 | } |
1388 | 1490 | ||
1389 | spin_lock_irqsave(&intf->events_lock, flags); | 1491 | spin_lock_irqsave(&intf->events_lock, flags); |
@@ -1392,12 +1494,13 @@ int ipmi_set_gets_events(ipmi_user_t user, bool val) | |||
1392 | 1494 | ||
1393 | out: | 1495 | out: |
1394 | spin_unlock_irqrestore(&intf->events_lock, flags); | 1496 | spin_unlock_irqrestore(&intf->events_lock, flags); |
1497 | release_ipmi_user(user, index); | ||
1395 | 1498 | ||
1396 | return 0; | 1499 | return 0; |
1397 | } | 1500 | } |
1398 | EXPORT_SYMBOL(ipmi_set_gets_events); | 1501 | EXPORT_SYMBOL(ipmi_set_gets_events); |
1399 | 1502 | ||
1400 | static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, | 1503 | static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, |
1401 | unsigned char netfn, | 1504 | unsigned char netfn, |
1402 | unsigned char cmd, | 1505 | unsigned char cmd, |
1403 | unsigned char chan) | 1506 | unsigned char chan) |
@@ -1412,7 +1515,7 @@ static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, | |||
1412 | return NULL; | 1515 | return NULL; |
1413 | } | 1516 | } |
1414 | 1517 | ||
1415 | static int is_cmd_rcvr_exclusive(ipmi_smi_t intf, | 1518 | static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, |
1416 | unsigned char netfn, | 1519 | unsigned char netfn, |
1417 | unsigned char cmd, | 1520 | unsigned char cmd, |
1418 | unsigned int chans) | 1521 | unsigned int chans) |
@@ -1427,19 +1530,24 @@ static int is_cmd_rcvr_exclusive(ipmi_smi_t intf, | |||
1427 | return 1; | 1530 | return 1; |
1428 | } | 1531 | } |
1429 | 1532 | ||
1430 | int ipmi_register_for_cmd(ipmi_user_t user, | 1533 | int ipmi_register_for_cmd(struct ipmi_user *user, |
1431 | unsigned char netfn, | 1534 | unsigned char netfn, |
1432 | unsigned char cmd, | 1535 | unsigned char cmd, |
1433 | unsigned int chans) | 1536 | unsigned int chans) |
1434 | { | 1537 | { |
1435 | ipmi_smi_t intf = user->intf; | 1538 | struct ipmi_smi *intf = user->intf; |
1436 | struct cmd_rcvr *rcvr; | 1539 | struct cmd_rcvr *rcvr; |
1437 | int rv = 0; | 1540 | int rv = 0, index; |
1438 | 1541 | ||
1542 | user = acquire_ipmi_user(user, &index); | ||
1543 | if (!user) | ||
1544 | return -ENODEV; | ||
1439 | 1545 | ||
1440 | rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); | 1546 | rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); |
1441 | if (!rcvr) | 1547 | if (!rcvr) { |
1442 | return -ENOMEM; | 1548 | rv = -ENOMEM; |
1549 | goto out_release; | ||
1550 | } | ||
1443 | rcvr->cmd = cmd; | 1551 | rcvr->cmd = cmd; |
1444 | rcvr->netfn = netfn; | 1552 | rcvr->netfn = netfn; |
1445 | rcvr->chans = chans; | 1553 | rcvr->chans = chans; |
@@ -1457,24 +1565,30 @@ int ipmi_register_for_cmd(ipmi_user_t user, | |||
1457 | 1565 | ||
1458 | list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); | 1566 | list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); |
1459 | 1567 | ||
1460 | out_unlock: | 1568 | out_unlock: |
1461 | mutex_unlock(&intf->cmd_rcvrs_mutex); | 1569 | mutex_unlock(&intf->cmd_rcvrs_mutex); |
1462 | if (rv) | 1570 | if (rv) |
1463 | kfree(rcvr); | 1571 | kfree(rcvr); |
1572 | out_release: | ||
1573 | release_ipmi_user(user, index); | ||
1464 | 1574 | ||
1465 | return rv; | 1575 | return rv; |
1466 | } | 1576 | } |
1467 | EXPORT_SYMBOL(ipmi_register_for_cmd); | 1577 | EXPORT_SYMBOL(ipmi_register_for_cmd); |
1468 | 1578 | ||
1469 | int ipmi_unregister_for_cmd(ipmi_user_t user, | 1579 | int ipmi_unregister_for_cmd(struct ipmi_user *user, |
1470 | unsigned char netfn, | 1580 | unsigned char netfn, |
1471 | unsigned char cmd, | 1581 | unsigned char cmd, |
1472 | unsigned int chans) | 1582 | unsigned int chans) |
1473 | { | 1583 | { |
1474 | ipmi_smi_t intf = user->intf; | 1584 | struct ipmi_smi *intf = user->intf; |
1475 | struct cmd_rcvr *rcvr; | 1585 | struct cmd_rcvr *rcvr; |
1476 | struct cmd_rcvr *rcvrs = NULL; | 1586 | struct cmd_rcvr *rcvrs = NULL; |
1477 | int i, rv = -ENOENT; | 1587 | int i, rv = -ENOENT, index; |
1588 | |||
1589 | user = acquire_ipmi_user(user, &index); | ||
1590 | if (!user) | ||
1591 | return -ENODEV; | ||
1478 | 1592 | ||
1479 | mutex_lock(&intf->cmd_rcvrs_mutex); | 1593 | mutex_lock(&intf->cmd_rcvrs_mutex); |
1480 | for (i = 0; i < IPMI_NUM_CHANNELS; i++) { | 1594 | for (i = 0; i < IPMI_NUM_CHANNELS; i++) { |
@@ -1495,12 +1609,14 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, | |||
1495 | } | 1609 | } |
1496 | mutex_unlock(&intf->cmd_rcvrs_mutex); | 1610 | mutex_unlock(&intf->cmd_rcvrs_mutex); |
1497 | synchronize_rcu(); | 1611 | synchronize_rcu(); |
1612 | release_ipmi_user(user, index); | ||
1498 | while (rcvrs) { | 1613 | while (rcvrs) { |
1499 | atomic_dec(&intf->event_waiters); | 1614 | atomic_dec(&intf->event_waiters); |
1500 | rcvr = rcvrs; | 1615 | rcvr = rcvrs; |
1501 | rcvrs = rcvr->next; | 1616 | rcvrs = rcvr->next; |
1502 | kfree(rcvr); | 1617 | kfree(rcvr); |
1503 | } | 1618 | } |
1619 | |||
1504 | return rv; | 1620 | return rv; |
1505 | } | 1621 | } |
1506 | EXPORT_SYMBOL(ipmi_unregister_for_cmd); | 1622 | EXPORT_SYMBOL(ipmi_unregister_for_cmd); |
@@ -1535,21 +1651,19 @@ static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, | |||
1535 | smi_msg->data[3] = 0; | 1651 | smi_msg->data[3] = 0; |
1536 | smi_msg->data[i+3] = ipmb_addr->slave_addr; | 1652 | smi_msg->data[i+3] = ipmb_addr->slave_addr; |
1537 | smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); | 1653 | smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); |
1538 | smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2); | 1654 | smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); |
1539 | smi_msg->data[i+6] = source_address; | 1655 | smi_msg->data[i+6] = source_address; |
1540 | smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; | 1656 | smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; |
1541 | smi_msg->data[i+8] = msg->cmd; | 1657 | smi_msg->data[i+8] = msg->cmd; |
1542 | 1658 | ||
1543 | /* Now tack on the data to the message. */ | 1659 | /* Now tack on the data to the message. */ |
1544 | if (msg->data_len > 0) | 1660 | if (msg->data_len > 0) |
1545 | memcpy(&(smi_msg->data[i+9]), msg->data, | 1661 | memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); |
1546 | msg->data_len); | ||
1547 | smi_msg->data_size = msg->data_len + 9; | 1662 | smi_msg->data_size = msg->data_len + 9; |
1548 | 1663 | ||
1549 | /* Now calculate the checksum and tack it on. */ | 1664 | /* Now calculate the checksum and tack it on. */ |
1550 | smi_msg->data[i+smi_msg->data_size] | 1665 | smi_msg->data[i+smi_msg->data_size] |
1551 | = ipmb_checksum(&(smi_msg->data[i+6]), | 1666 | = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); |
1552 | smi_msg->data_size-6); | ||
1553 | 1667 | ||
1554 | /* | 1668 | /* |
1555 | * Add on the checksum size and the offset from the | 1669 | * Add on the checksum size and the offset from the |
@@ -1574,21 +1688,19 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, | |||
1574 | smi_msg->data[3] = lan_addr->session_handle; | 1688 | smi_msg->data[3] = lan_addr->session_handle; |
1575 | smi_msg->data[4] = lan_addr->remote_SWID; | 1689 | smi_msg->data[4] = lan_addr->remote_SWID; |
1576 | smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); | 1690 | smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); |
1577 | smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2); | 1691 | smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); |
1578 | smi_msg->data[7] = lan_addr->local_SWID; | 1692 | smi_msg->data[7] = lan_addr->local_SWID; |
1579 | smi_msg->data[8] = (ipmb_seq << 2) | source_lun; | 1693 | smi_msg->data[8] = (ipmb_seq << 2) | source_lun; |
1580 | smi_msg->data[9] = msg->cmd; | 1694 | smi_msg->data[9] = msg->cmd; |
1581 | 1695 | ||
1582 | /* Now tack on the data to the message. */ | 1696 | /* Now tack on the data to the message. */ |
1583 | if (msg->data_len > 0) | 1697 | if (msg->data_len > 0) |
1584 | memcpy(&(smi_msg->data[10]), msg->data, | 1698 | memcpy(&smi_msg->data[10], msg->data, msg->data_len); |
1585 | msg->data_len); | ||
1586 | smi_msg->data_size = msg->data_len + 10; | 1699 | smi_msg->data_size = msg->data_len + 10; |
1587 | 1700 | ||
1588 | /* Now calculate the checksum and tack it on. */ | 1701 | /* Now calculate the checksum and tack it on. */ |
1589 | smi_msg->data[smi_msg->data_size] | 1702 | smi_msg->data[smi_msg->data_size] |
1590 | = ipmb_checksum(&(smi_msg->data[7]), | 1703 | = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); |
1591 | smi_msg->data_size-7); | ||
1592 | 1704 | ||
1593 | /* | 1705 | /* |
1594 | * Add on the checksum size and the offset from the | 1706 | * Add on the checksum size and the offset from the |
@@ -1599,7 +1711,7 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, | |||
1599 | smi_msg->msgid = msgid; | 1711 | smi_msg->msgid = msgid; |
1600 | } | 1712 | } |
1601 | 1713 | ||
1602 | static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf, | 1714 | static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, |
1603 | struct ipmi_smi_msg *smi_msg, | 1715 | struct ipmi_smi_msg *smi_msg, |
1604 | int priority) | 1716 | int priority) |
1605 | { | 1717 | { |
@@ -1617,7 +1729,8 @@ static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf, | |||
1617 | } | 1729 | } |
1618 | 1730 | ||
1619 | 1731 | ||
1620 | static void smi_send(ipmi_smi_t intf, const struct ipmi_smi_handlers *handlers, | 1732 | static void smi_send(struct ipmi_smi *intf, |
1733 | const struct ipmi_smi_handlers *handlers, | ||
1621 | struct ipmi_smi_msg *smi_msg, int priority) | 1734 | struct ipmi_smi_msg *smi_msg, int priority) |
1622 | { | 1735 | { |
1623 | int run_to_completion = intf->run_to_completion; | 1736 | int run_to_completion = intf->run_to_completion; |
@@ -1636,405 +1749,435 @@ static void smi_send(ipmi_smi_t intf, const struct ipmi_smi_handlers *handlers, | |||
1636 | handlers->sender(intf->send_info, smi_msg); | 1749 | handlers->sender(intf->send_info, smi_msg); |
1637 | } | 1750 | } |
1638 | 1751 | ||
1639 | /* | 1752 | static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) |
1640 | * Separate from ipmi_request so that the user does not have to be | ||
1641 | * supplied in certain circumstances (mainly at panic time). If | ||
1642 | * messages are supplied, they will be freed, even if an error | ||
1643 | * occurs. | ||
1644 | */ | ||
1645 | static int i_ipmi_request(ipmi_user_t user, | ||
1646 | ipmi_smi_t intf, | ||
1647 | struct ipmi_addr *addr, | ||
1648 | long msgid, | ||
1649 | struct kernel_ipmi_msg *msg, | ||
1650 | void *user_msg_data, | ||
1651 | void *supplied_smi, | ||
1652 | struct ipmi_recv_msg *supplied_recv, | ||
1653 | int priority, | ||
1654 | unsigned char source_address, | ||
1655 | unsigned char source_lun, | ||
1656 | int retries, | ||
1657 | unsigned int retry_time_ms) | ||
1658 | { | 1753 | { |
1659 | int rv = 0; | 1754 | return (((msg->netfn == IPMI_NETFN_APP_REQUEST) |
1660 | struct ipmi_smi_msg *smi_msg; | 1755 | && ((msg->cmd == IPMI_COLD_RESET_CMD) |
1661 | struct ipmi_recv_msg *recv_msg; | 1756 | || (msg->cmd == IPMI_WARM_RESET_CMD))) |
1662 | unsigned long flags; | 1757 | || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); |
1758 | } | ||
1663 | 1759 | ||
1760 | static int i_ipmi_req_sysintf(struct ipmi_smi *intf, | ||
1761 | struct ipmi_addr *addr, | ||
1762 | long msgid, | ||
1763 | struct kernel_ipmi_msg *msg, | ||
1764 | struct ipmi_smi_msg *smi_msg, | ||
1765 | struct ipmi_recv_msg *recv_msg, | ||
1766 | int retries, | ||
1767 | unsigned int retry_time_ms) | ||
1768 | { | ||
1769 | struct ipmi_system_interface_addr *smi_addr; | ||
1664 | 1770 | ||
1665 | if (supplied_recv) | 1771 | if (msg->netfn & 1) |
1666 | recv_msg = supplied_recv; | 1772 | /* Responses are not allowed to the SMI. */ |
1667 | else { | 1773 | return -EINVAL; |
1668 | recv_msg = ipmi_alloc_recv_msg(); | ||
1669 | if (recv_msg == NULL) | ||
1670 | return -ENOMEM; | ||
1671 | } | ||
1672 | recv_msg->user_msg_data = user_msg_data; | ||
1673 | 1774 | ||
1674 | if (supplied_smi) | 1775 | smi_addr = (struct ipmi_system_interface_addr *) addr; |
1675 | smi_msg = (struct ipmi_smi_msg *) supplied_smi; | 1776 | if (smi_addr->lun > 3) { |
1676 | else { | 1777 | ipmi_inc_stat(intf, sent_invalid_commands); |
1677 | smi_msg = ipmi_alloc_smi_msg(); | 1778 | return -EINVAL; |
1678 | if (smi_msg == NULL) { | ||
1679 | ipmi_free_recv_msg(recv_msg); | ||
1680 | return -ENOMEM; | ||
1681 | } | ||
1682 | } | 1779 | } |
1683 | 1780 | ||
1684 | rcu_read_lock(); | 1781 | memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); |
1685 | if (intf->in_shutdown) { | ||
1686 | rv = -ENODEV; | ||
1687 | goto out_err; | ||
1688 | } | ||
1689 | 1782 | ||
1690 | recv_msg->user = user; | 1783 | if ((msg->netfn == IPMI_NETFN_APP_REQUEST) |
1691 | if (user) | 1784 | && ((msg->cmd == IPMI_SEND_MSG_CMD) |
1692 | kref_get(&user->refcount); | 1785 | || (msg->cmd == IPMI_GET_MSG_CMD) |
1693 | recv_msg->msgid = msgid; | 1786 | || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { |
1694 | /* | 1787 | /* |
1695 | * Store the message to send in the receive message so timeout | 1788 | * We don't let the user do these, since we manage |
1696 | * responses can get the proper response data. | 1789 | * the sequence numbers. |
1697 | */ | 1790 | */ |
1698 | recv_msg->msg = *msg; | 1791 | ipmi_inc_stat(intf, sent_invalid_commands); |
1792 | return -EINVAL; | ||
1793 | } | ||
1699 | 1794 | ||
1700 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { | 1795 | if (is_maintenance_mode_cmd(msg)) { |
1701 | struct ipmi_system_interface_addr *smi_addr; | 1796 | unsigned long flags; |
1702 | 1797 | ||
1703 | if (msg->netfn & 1) { | 1798 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); |
1704 | /* Responses are not allowed to the SMI. */ | 1799 | intf->auto_maintenance_timeout |
1705 | rv = -EINVAL; | 1800 | = maintenance_mode_timeout_ms; |
1706 | goto out_err; | 1801 | if (!intf->maintenance_mode |
1802 | && !intf->maintenance_mode_enable) { | ||
1803 | intf->maintenance_mode_enable = true; | ||
1804 | maintenance_mode_update(intf); | ||
1707 | } | 1805 | } |
1806 | spin_unlock_irqrestore(&intf->maintenance_mode_lock, | ||
1807 | flags); | ||
1808 | } | ||
1708 | 1809 | ||
1709 | smi_addr = (struct ipmi_system_interface_addr *) addr; | 1810 | if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { |
1710 | if (smi_addr->lun > 3) { | 1811 | ipmi_inc_stat(intf, sent_invalid_commands); |
1711 | ipmi_inc_stat(intf, sent_invalid_commands); | 1812 | return -EMSGSIZE; |
1712 | rv = -EINVAL; | 1813 | } |
1713 | goto out_err; | ||
1714 | } | ||
1715 | 1814 | ||
1716 | memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); | 1815 | smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); |
1816 | smi_msg->data[1] = msg->cmd; | ||
1817 | smi_msg->msgid = msgid; | ||
1818 | smi_msg->user_data = recv_msg; | ||
1819 | if (msg->data_len > 0) | ||
1820 | memcpy(&smi_msg->data[2], msg->data, msg->data_len); | ||
1821 | smi_msg->data_size = msg->data_len + 2; | ||
1822 | ipmi_inc_stat(intf, sent_local_commands); | ||
1717 | 1823 | ||
1718 | if ((msg->netfn == IPMI_NETFN_APP_REQUEST) | 1824 | return 0; |
1719 | && ((msg->cmd == IPMI_SEND_MSG_CMD) | 1825 | } |
1720 | || (msg->cmd == IPMI_GET_MSG_CMD) | ||
1721 | || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { | ||
1722 | /* | ||
1723 | * We don't let the user do these, since we manage | ||
1724 | * the sequence numbers. | ||
1725 | */ | ||
1726 | ipmi_inc_stat(intf, sent_invalid_commands); | ||
1727 | rv = -EINVAL; | ||
1728 | goto out_err; | ||
1729 | } | ||
1730 | 1826 | ||
1731 | if (((msg->netfn == IPMI_NETFN_APP_REQUEST) | 1827 | static int i_ipmi_req_ipmb(struct ipmi_smi *intf, |
1732 | && ((msg->cmd == IPMI_COLD_RESET_CMD) | 1828 | struct ipmi_addr *addr, |
1733 | || (msg->cmd == IPMI_WARM_RESET_CMD))) | 1829 | long msgid, |
1734 | || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) { | 1830 | struct kernel_ipmi_msg *msg, |
1735 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); | 1831 | struct ipmi_smi_msg *smi_msg, |
1736 | intf->auto_maintenance_timeout | 1832 | struct ipmi_recv_msg *recv_msg, |
1737 | = IPMI_MAINTENANCE_MODE_TIMEOUT; | 1833 | unsigned char source_address, |
1738 | if (!intf->maintenance_mode | 1834 | unsigned char source_lun, |
1739 | && !intf->maintenance_mode_enable) { | 1835 | int retries, |
1740 | intf->maintenance_mode_enable = true; | 1836 | unsigned int retry_time_ms) |
1741 | maintenance_mode_update(intf); | 1837 | { |
1742 | } | 1838 | struct ipmi_ipmb_addr *ipmb_addr; |
1743 | spin_unlock_irqrestore(&intf->maintenance_mode_lock, | 1839 | unsigned char ipmb_seq; |
1744 | flags); | 1840 | long seqid; |
1745 | } | 1841 | int broadcast = 0; |
1842 | struct ipmi_channel *chans; | ||
1843 | int rv = 0; | ||
1746 | 1844 | ||
1747 | if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { | 1845 | if (addr->channel >= IPMI_MAX_CHANNELS) { |
1748 | ipmi_inc_stat(intf, sent_invalid_commands); | 1846 | ipmi_inc_stat(intf, sent_invalid_commands); |
1749 | rv = -EMSGSIZE; | 1847 | return -EINVAL; |
1750 | goto out_err; | 1848 | } |
1751 | } | ||
1752 | 1849 | ||
1753 | smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); | 1850 | chans = READ_ONCE(intf->channel_list)->c; |
1754 | smi_msg->data[1] = msg->cmd; | ||
1755 | smi_msg->msgid = msgid; | ||
1756 | smi_msg->user_data = recv_msg; | ||
1757 | if (msg->data_len > 0) | ||
1758 | memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); | ||
1759 | smi_msg->data_size = msg->data_len + 2; | ||
1760 | ipmi_inc_stat(intf, sent_local_commands); | ||
1761 | } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { | ||
1762 | struct ipmi_ipmb_addr *ipmb_addr; | ||
1763 | unsigned char ipmb_seq; | ||
1764 | long seqid; | ||
1765 | int broadcast = 0; | ||
1766 | struct ipmi_channel *chans; | ||
1767 | 1851 | ||
1768 | if (addr->channel >= IPMI_MAX_CHANNELS) { | 1852 | if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { |
1769 | ipmi_inc_stat(intf, sent_invalid_commands); | 1853 | ipmi_inc_stat(intf, sent_invalid_commands); |
1770 | rv = -EINVAL; | 1854 | return -EINVAL; |
1771 | goto out_err; | 1855 | } |
1772 | } | ||
1773 | 1856 | ||
1774 | chans = READ_ONCE(intf->channel_list)->c; | 1857 | if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { |
1858 | /* | ||
1859 | * Broadcasts add a zero at the beginning of the | ||
1860 | * message, but otherwise is the same as an IPMB | ||
1861 | * address. | ||
1862 | */ | ||
1863 | addr->addr_type = IPMI_IPMB_ADDR_TYPE; | ||
1864 | broadcast = 1; | ||
1865 | retries = 0; /* Don't retry broadcasts. */ | ||
1866 | } | ||
1775 | 1867 | ||
1776 | if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { | 1868 | /* |
1777 | ipmi_inc_stat(intf, sent_invalid_commands); | 1869 | * 9 for the header and 1 for the checksum, plus |
1778 | rv = -EINVAL; | 1870 | * possibly one for the broadcast. |
1779 | goto out_err; | 1871 | */ |
1780 | } | 1872 | if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { |
1873 | ipmi_inc_stat(intf, sent_invalid_commands); | ||
1874 | return -EMSGSIZE; | ||
1875 | } | ||
1781 | 1876 | ||
1782 | if (retries < 0) { | 1877 | ipmb_addr = (struct ipmi_ipmb_addr *) addr; |
1783 | if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) | 1878 | if (ipmb_addr->lun > 3) { |
1784 | retries = 0; /* Don't retry broadcasts. */ | 1879 | ipmi_inc_stat(intf, sent_invalid_commands); |
1785 | else | 1880 | return -EINVAL; |
1786 | retries = 4; | 1881 | } |
1787 | } | ||
1788 | if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { | ||
1789 | /* | ||
1790 | * Broadcasts add a zero at the beginning of the | ||
1791 | * message, but otherwise is the same as an IPMB | ||
1792 | * address. | ||
1793 | */ | ||
1794 | addr->addr_type = IPMI_IPMB_ADDR_TYPE; | ||
1795 | broadcast = 1; | ||
1796 | } | ||
1797 | 1882 | ||
1883 | memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); | ||
1798 | 1884 | ||
1799 | /* Default to 1 second retries. */ | 1885 | if (recv_msg->msg.netfn & 0x1) { |
1800 | if (retry_time_ms == 0) | 1886 | /* |
1801 | retry_time_ms = 1000; | 1887 | * It's a response, so use the user's sequence |
1888 | * from msgid. | ||
1889 | */ | ||
1890 | ipmi_inc_stat(intf, sent_ipmb_responses); | ||
1891 | format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, | ||
1892 | msgid, broadcast, | ||
1893 | source_address, source_lun); | ||
1802 | 1894 | ||
1803 | /* | 1895 | /* |
1804 | * 9 for the header and 1 for the checksum, plus | 1896 | * Save the receive message so we can use it |
1805 | * possibly one for the broadcast. | 1897 | * to deliver the response. |
1806 | */ | 1898 | */ |
1807 | if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { | 1899 | smi_msg->user_data = recv_msg; |
1808 | ipmi_inc_stat(intf, sent_invalid_commands); | 1900 | } else { |
1809 | rv = -EMSGSIZE; | 1901 | /* It's a command, so get a sequence for it. */ |
1810 | goto out_err; | 1902 | unsigned long flags; |
1811 | } | ||
1812 | 1903 | ||
1813 | ipmb_addr = (struct ipmi_ipmb_addr *) addr; | 1904 | spin_lock_irqsave(&intf->seq_lock, flags); |
1814 | if (ipmb_addr->lun > 3) { | ||
1815 | ipmi_inc_stat(intf, sent_invalid_commands); | ||
1816 | rv = -EINVAL; | ||
1817 | goto out_err; | ||
1818 | } | ||
1819 | 1905 | ||
1820 | memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); | 1906 | if (is_maintenance_mode_cmd(msg)) |
1907 | intf->ipmb_maintenance_mode_timeout = | ||
1908 | maintenance_mode_timeout_ms; | ||
1821 | 1909 | ||
1822 | if (recv_msg->msg.netfn & 0x1) { | 1910 | if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) |
1823 | /* | 1911 | /* Different default in maintenance mode */ |
1824 | * It's a response, so use the user's sequence | 1912 | retry_time_ms = default_maintenance_retry_ms; |
1825 | * from msgid. | ||
1826 | */ | ||
1827 | ipmi_inc_stat(intf, sent_ipmb_responses); | ||
1828 | format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, | ||
1829 | msgid, broadcast, | ||
1830 | source_address, source_lun); | ||
1831 | 1913 | ||
1914 | /* | ||
1915 | * Create a sequence number with a 1 second | ||
1916 | * timeout and 4 retries. | ||
1917 | */ | ||
1918 | rv = intf_next_seq(intf, | ||
1919 | recv_msg, | ||
1920 | retry_time_ms, | ||
1921 | retries, | ||
1922 | broadcast, | ||
1923 | &ipmb_seq, | ||
1924 | &seqid); | ||
1925 | if (rv) | ||
1832 | /* | 1926 | /* |
1833 | * Save the receive message so we can use it | 1927 | * We have used up all the sequence numbers, |
1834 | * to deliver the response. | 1928 | * probably, so abort. |
1835 | */ | 1929 | */ |
1836 | smi_msg->user_data = recv_msg; | 1930 | goto out_err; |
1837 | } else { | ||
1838 | /* It's a command, so get a sequence for it. */ | ||
1839 | 1931 | ||
1840 | spin_lock_irqsave(&(intf->seq_lock), flags); | 1932 | ipmi_inc_stat(intf, sent_ipmb_commands); |
1841 | 1933 | ||
1842 | /* | 1934 | /* |
1843 | * Create a sequence number with a 1 second | 1935 | * Store the sequence number in the message, |
1844 | * timeout and 4 retries. | 1936 | * so that when the send message response |
1845 | */ | 1937 | * comes back we can start the timer. |
1846 | rv = intf_next_seq(intf, | 1938 | */ |
1847 | recv_msg, | 1939 | format_ipmb_msg(smi_msg, msg, ipmb_addr, |
1848 | retry_time_ms, | 1940 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), |
1849 | retries, | 1941 | ipmb_seq, broadcast, |
1850 | broadcast, | 1942 | source_address, source_lun); |
1851 | &ipmb_seq, | ||
1852 | &seqid); | ||
1853 | if (rv) { | ||
1854 | /* | ||
1855 | * We have used up all the sequence numbers, | ||
1856 | * probably, so abort. | ||
1857 | */ | ||
1858 | spin_unlock_irqrestore(&(intf->seq_lock), | ||
1859 | flags); | ||
1860 | goto out_err; | ||
1861 | } | ||
1862 | 1943 | ||
1863 | ipmi_inc_stat(intf, sent_ipmb_commands); | 1944 | /* |
1945 | * Copy the message into the recv message data, so we | ||
1946 | * can retransmit it later if necessary. | ||
1947 | */ | ||
1948 | memcpy(recv_msg->msg_data, smi_msg->data, | ||
1949 | smi_msg->data_size); | ||
1950 | recv_msg->msg.data = recv_msg->msg_data; | ||
1951 | recv_msg->msg.data_len = smi_msg->data_size; | ||
1864 | 1952 | ||
1865 | /* | 1953 | /* |
1866 | * Store the sequence number in the message, | 1954 | * We don't unlock until here, because we need |
1867 | * so that when the send message response | 1955 | * to copy the completed message into the |
1868 | * comes back we can start the timer. | 1956 | * recv_msg before we release the lock. |
1869 | */ | 1957 | * Otherwise, race conditions may bite us. I |
1870 | format_ipmb_msg(smi_msg, msg, ipmb_addr, | 1958 | * know that's pretty paranoid, but I prefer |
1871 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), | 1959 | * to be correct. |
1872 | ipmb_seq, broadcast, | 1960 | */ |
1873 | source_address, source_lun); | 1961 | out_err: |
1962 | spin_unlock_irqrestore(&intf->seq_lock, flags); | ||
1963 | } | ||
1874 | 1964 | ||
1875 | /* | 1965 | return rv; |
1876 | * Copy the message into the recv message data, so we | 1966 | } |
1877 | * can retransmit it later if necessary. | ||
1878 | */ | ||
1879 | memcpy(recv_msg->msg_data, smi_msg->data, | ||
1880 | smi_msg->data_size); | ||
1881 | recv_msg->msg.data = recv_msg->msg_data; | ||
1882 | recv_msg->msg.data_len = smi_msg->data_size; | ||
1883 | 1967 | ||
1884 | /* | 1968 | static int i_ipmi_req_lan(struct ipmi_smi *intf, |
1885 | * We don't unlock until here, because we need | 1969 | struct ipmi_addr *addr, |
1886 | * to copy the completed message into the | 1970 | long msgid, |
1887 | * recv_msg before we release the lock. | 1971 | struct kernel_ipmi_msg *msg, |
1888 | * Otherwise, race conditions may bite us. I | 1972 | struct ipmi_smi_msg *smi_msg, |
1889 | * know that's pretty paranoid, but I prefer | 1973 | struct ipmi_recv_msg *recv_msg, |
1890 | * to be correct. | 1974 | unsigned char source_lun, |
1891 | */ | 1975 | int retries, |
1892 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 1976 | unsigned int retry_time_ms) |
1893 | } | 1977 | { |
1894 | } else if (is_lan_addr(addr)) { | 1978 | struct ipmi_lan_addr *lan_addr; |
1895 | struct ipmi_lan_addr *lan_addr; | 1979 | unsigned char ipmb_seq; |
1896 | unsigned char ipmb_seq; | 1980 | long seqid; |
1897 | long seqid; | 1981 | struct ipmi_channel *chans; |
1898 | struct ipmi_channel *chans; | 1982 | int rv = 0; |
1899 | 1983 | ||
1900 | if (addr->channel >= IPMI_MAX_CHANNELS) { | 1984 | if (addr->channel >= IPMI_MAX_CHANNELS) { |
1901 | ipmi_inc_stat(intf, sent_invalid_commands); | 1985 | ipmi_inc_stat(intf, sent_invalid_commands); |
1902 | rv = -EINVAL; | 1986 | return -EINVAL; |
1903 | goto out_err; | 1987 | } |
1904 | } | ||
1905 | 1988 | ||
1906 | chans = READ_ONCE(intf->channel_list)->c; | 1989 | chans = READ_ONCE(intf->channel_list)->c; |
1907 | 1990 | ||
1908 | if ((chans[addr->channel].medium | 1991 | if ((chans[addr->channel].medium |
1909 | != IPMI_CHANNEL_MEDIUM_8023LAN) | 1992 | != IPMI_CHANNEL_MEDIUM_8023LAN) |
1910 | && (chans[addr->channel].medium | 1993 | && (chans[addr->channel].medium |
1911 | != IPMI_CHANNEL_MEDIUM_ASYNC)) { | 1994 | != IPMI_CHANNEL_MEDIUM_ASYNC)) { |
1912 | ipmi_inc_stat(intf, sent_invalid_commands); | 1995 | ipmi_inc_stat(intf, sent_invalid_commands); |
1913 | rv = -EINVAL; | 1996 | return -EINVAL; |
1914 | goto out_err; | 1997 | } |
1915 | } | ||
1916 | 1998 | ||
1917 | retries = 4; | 1999 | /* 11 for the header and 1 for the checksum. */ |
2000 | if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { | ||
2001 | ipmi_inc_stat(intf, sent_invalid_commands); | ||
2002 | return -EMSGSIZE; | ||
2003 | } | ||
1918 | 2004 | ||
1919 | /* Default to 1 second retries. */ | 2005 | lan_addr = (struct ipmi_lan_addr *) addr; |
1920 | if (retry_time_ms == 0) | 2006 | if (lan_addr->lun > 3) { |
1921 | retry_time_ms = 1000; | 2007 | ipmi_inc_stat(intf, sent_invalid_commands); |
2008 | return -EINVAL; | ||
2009 | } | ||
1922 | 2010 | ||
1923 | /* 11 for the header and 1 for the checksum. */ | 2011 | memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); |
1924 | if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { | ||
1925 | ipmi_inc_stat(intf, sent_invalid_commands); | ||
1926 | rv = -EMSGSIZE; | ||
1927 | goto out_err; | ||
1928 | } | ||
1929 | 2012 | ||
1930 | lan_addr = (struct ipmi_lan_addr *) addr; | 2013 | if (recv_msg->msg.netfn & 0x1) { |
1931 | if (lan_addr->lun > 3) { | 2014 | /* |
1932 | ipmi_inc_stat(intf, sent_invalid_commands); | 2015 | * It's a response, so use the user's sequence |
1933 | rv = -EINVAL; | 2016 | * from msgid. |
1934 | goto out_err; | 2017 | */ |
1935 | } | 2018 | ipmi_inc_stat(intf, sent_lan_responses); |
2019 | format_lan_msg(smi_msg, msg, lan_addr, msgid, | ||
2020 | msgid, source_lun); | ||
1936 | 2021 | ||
1937 | memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); | 2022 | /* |
2023 | * Save the receive message so we can use it | ||
2024 | * to deliver the response. | ||
2025 | */ | ||
2026 | smi_msg->user_data = recv_msg; | ||
2027 | } else { | ||
2028 | /* It's a command, so get a sequence for it. */ | ||
2029 | unsigned long flags; | ||
1938 | 2030 | ||
1939 | if (recv_msg->msg.netfn & 0x1) { | 2031 | spin_lock_irqsave(&intf->seq_lock, flags); |
1940 | /* | ||
1941 | * It's a response, so use the user's sequence | ||
1942 | * from msgid. | ||
1943 | */ | ||
1944 | ipmi_inc_stat(intf, sent_lan_responses); | ||
1945 | format_lan_msg(smi_msg, msg, lan_addr, msgid, | ||
1946 | msgid, source_lun); | ||
1947 | 2032 | ||
2033 | /* | ||
2034 | * Create a sequence number with a 1 second | ||
2035 | * timeout and 4 retries. | ||
2036 | */ | ||
2037 | rv = intf_next_seq(intf, | ||
2038 | recv_msg, | ||
2039 | retry_time_ms, | ||
2040 | retries, | ||
2041 | 0, | ||
2042 | &ipmb_seq, | ||
2043 | &seqid); | ||
2044 | if (rv) | ||
1948 | /* | 2045 | /* |
1949 | * Save the receive message so we can use it | 2046 | * We have used up all the sequence numbers, |
1950 | * to deliver the response. | 2047 | * probably, so abort. |
1951 | */ | 2048 | */ |
1952 | smi_msg->user_data = recv_msg; | 2049 | goto out_err; |
1953 | } else { | ||
1954 | /* It's a command, so get a sequence for it. */ | ||
1955 | 2050 | ||
1956 | spin_lock_irqsave(&(intf->seq_lock), flags); | 2051 | ipmi_inc_stat(intf, sent_lan_commands); |
1957 | 2052 | ||
1958 | /* | 2053 | /* |
1959 | * Create a sequence number with a 1 second | 2054 | * Store the sequence number in the message, |
1960 | * timeout and 4 retries. | 2055 | * so that when the send message response |
1961 | */ | 2056 | * comes back we can start the timer. |
1962 | rv = intf_next_seq(intf, | 2057 | */ |
1963 | recv_msg, | 2058 | format_lan_msg(smi_msg, msg, lan_addr, |
1964 | retry_time_ms, | 2059 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), |
1965 | retries, | 2060 | ipmb_seq, source_lun); |
1966 | 0, | ||
1967 | &ipmb_seq, | ||
1968 | &seqid); | ||
1969 | if (rv) { | ||
1970 | /* | ||
1971 | * We have used up all the sequence numbers, | ||
1972 | * probably, so abort. | ||
1973 | */ | ||
1974 | spin_unlock_irqrestore(&(intf->seq_lock), | ||
1975 | flags); | ||
1976 | goto out_err; | ||
1977 | } | ||
1978 | 2061 | ||
1979 | ipmi_inc_stat(intf, sent_lan_commands); | 2062 | /* |
2063 | * Copy the message into the recv message data, so we | ||
2064 | * can retransmit it later if necessary. | ||
2065 | */ | ||
2066 | memcpy(recv_msg->msg_data, smi_msg->data, | ||
2067 | smi_msg->data_size); | ||
2068 | recv_msg->msg.data = recv_msg->msg_data; | ||
2069 | recv_msg->msg.data_len = smi_msg->data_size; | ||
1980 | 2070 | ||
1981 | /* | 2071 | /* |
1982 | * Store the sequence number in the message, | 2072 | * We don't unlock until here, because we need |
1983 | * so that when the send message response | 2073 | * to copy the completed message into the |
1984 | * comes back we can start the timer. | 2074 | * recv_msg before we release the lock. |
1985 | */ | 2075 | * Otherwise, race conditions may bite us. I |
1986 | format_lan_msg(smi_msg, msg, lan_addr, | 2076 | * know that's pretty paranoid, but I prefer |
1987 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), | 2077 | * to be correct. |
1988 | ipmb_seq, source_lun); | 2078 | */ |
2079 | out_err: | ||
2080 | spin_unlock_irqrestore(&intf->seq_lock, flags); | ||
2081 | } | ||
1989 | 2082 | ||
1990 | /* | 2083 | return rv; |
1991 | * Copy the message into the recv message data, so we | 2084 | } |
1992 | * can retransmit it later if necessary. | ||
1993 | */ | ||
1994 | memcpy(recv_msg->msg_data, smi_msg->data, | ||
1995 | smi_msg->data_size); | ||
1996 | recv_msg->msg.data = recv_msg->msg_data; | ||
1997 | recv_msg->msg.data_len = smi_msg->data_size; | ||
1998 | 2085 | ||
1999 | /* | 2086 | /* |
2000 | * We don't unlock until here, because we need | 2087 | * Separate from ipmi_request so that the user does not have to be |
2001 | * to copy the completed message into the | 2088 | * supplied in certain circumstances (mainly at panic time). If |
2002 | * recv_msg before we release the lock. | 2089 | * messages are supplied, they will be freed, even if an error |
2003 | * Otherwise, race conditions may bite us. I | 2090 | * occurs. |
2004 | * know that's pretty paranoid, but I prefer | 2091 | */ |
2005 | * to be correct. | 2092 | static int i_ipmi_request(struct ipmi_user *user, |
2006 | */ | 2093 | struct ipmi_smi *intf, |
2007 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 2094 | struct ipmi_addr *addr, |
2095 | long msgid, | ||
2096 | struct kernel_ipmi_msg *msg, | ||
2097 | void *user_msg_data, | ||
2098 | void *supplied_smi, | ||
2099 | struct ipmi_recv_msg *supplied_recv, | ||
2100 | int priority, | ||
2101 | unsigned char source_address, | ||
2102 | unsigned char source_lun, | ||
2103 | int retries, | ||
2104 | unsigned int retry_time_ms) | ||
2105 | { | ||
2106 | struct ipmi_smi_msg *smi_msg; | ||
2107 | struct ipmi_recv_msg *recv_msg; | ||
2108 | int rv = 0; | ||
2109 | |||
2110 | if (supplied_recv) | ||
2111 | recv_msg = supplied_recv; | ||
2112 | else { | ||
2113 | recv_msg = ipmi_alloc_recv_msg(); | ||
2114 | if (recv_msg == NULL) { | ||
2115 | rv = -ENOMEM; | ||
2116 | goto out; | ||
2008 | } | 2117 | } |
2118 | } | ||
2119 | recv_msg->user_msg_data = user_msg_data; | ||
2120 | |||
2121 | if (supplied_smi) | ||
2122 | smi_msg = (struct ipmi_smi_msg *) supplied_smi; | ||
2123 | else { | ||
2124 | smi_msg = ipmi_alloc_smi_msg(); | ||
2125 | if (smi_msg == NULL) { | ||
2126 | ipmi_free_recv_msg(recv_msg); | ||
2127 | rv = -ENOMEM; | ||
2128 | goto out; | ||
2129 | } | ||
2130 | } | ||
2131 | |||
2132 | rcu_read_lock(); | ||
2133 | if (intf->in_shutdown) { | ||
2134 | rv = -ENODEV; | ||
2135 | goto out_err; | ||
2136 | } | ||
2137 | |||
2138 | recv_msg->user = user; | ||
2139 | if (user) | ||
2140 | /* The put happens when the message is freed. */ | ||
2141 | kref_get(&user->refcount); | ||
2142 | recv_msg->msgid = msgid; | ||
2143 | /* | ||
2144 | * Store the message to send in the receive message so timeout | ||
2145 | * responses can get the proper response data. | ||
2146 | */ | ||
2147 | recv_msg->msg = *msg; | ||
2148 | |||
2149 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { | ||
2150 | rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, | ||
2151 | recv_msg, retries, retry_time_ms); | ||
2152 | } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { | ||
2153 | rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, | ||
2154 | source_address, source_lun, | ||
2155 | retries, retry_time_ms); | ||
2156 | } else if (is_lan_addr(addr)) { | ||
2157 | rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, | ||
2158 | source_lun, retries, retry_time_ms); | ||
2009 | } else { | 2159 | } else { |
2010 | /* Unknown address type. */ | 2160 | /* Unknown address type. */ |
2011 | ipmi_inc_stat(intf, sent_invalid_commands); | 2161 | ipmi_inc_stat(intf, sent_invalid_commands); |
2012 | rv = -EINVAL; | 2162 | rv = -EINVAL; |
2013 | goto out_err; | ||
2014 | } | 2163 | } |
2015 | 2164 | ||
2016 | #ifdef DEBUG_MSGING | 2165 | if (rv) { |
2017 | { | 2166 | out_err: |
2018 | int m; | 2167 | ipmi_free_smi_msg(smi_msg); |
2019 | for (m = 0; m < smi_msg->data_size; m++) | 2168 | ipmi_free_recv_msg(recv_msg); |
2020 | printk(" %2.2x", smi_msg->data[m]); | 2169 | } else { |
2021 | printk("\n"); | 2170 | ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size); |
2022 | } | ||
2023 | #endif | ||
2024 | 2171 | ||
2025 | smi_send(intf, intf->handlers, smi_msg, priority); | 2172 | smi_send(intf, intf->handlers, smi_msg, priority); |
2173 | } | ||
2026 | rcu_read_unlock(); | 2174 | rcu_read_unlock(); |
2027 | 2175 | ||
2028 | return 0; | 2176 | out: |
2029 | |||
2030 | out_err: | ||
2031 | rcu_read_unlock(); | ||
2032 | ipmi_free_smi_msg(smi_msg); | ||
2033 | ipmi_free_recv_msg(recv_msg); | ||
2034 | return rv; | 2177 | return rv; |
2035 | } | 2178 | } |
2036 | 2179 | ||
2037 | static int check_addr(ipmi_smi_t intf, | 2180 | static int check_addr(struct ipmi_smi *intf, |
2038 | struct ipmi_addr *addr, | 2181 | struct ipmi_addr *addr, |
2039 | unsigned char *saddr, | 2182 | unsigned char *saddr, |
2040 | unsigned char *lun) | 2183 | unsigned char *lun) |
@@ -2046,7 +2189,7 @@ static int check_addr(ipmi_smi_t intf, | |||
2046 | return 0; | 2189 | return 0; |
2047 | } | 2190 | } |
2048 | 2191 | ||
2049 | int ipmi_request_settime(ipmi_user_t user, | 2192 | int ipmi_request_settime(struct ipmi_user *user, |
2050 | struct ipmi_addr *addr, | 2193 | struct ipmi_addr *addr, |
2051 | long msgid, | 2194 | long msgid, |
2052 | struct kernel_ipmi_msg *msg, | 2195 | struct kernel_ipmi_msg *msg, |
@@ -2056,29 +2199,36 @@ int ipmi_request_settime(ipmi_user_t user, | |||
2056 | unsigned int retry_time_ms) | 2199 | unsigned int retry_time_ms) |
2057 | { | 2200 | { |
2058 | unsigned char saddr = 0, lun = 0; | 2201 | unsigned char saddr = 0, lun = 0; |
2059 | int rv; | 2202 | int rv, index; |
2060 | 2203 | ||
2061 | if (!user) | 2204 | if (!user) |
2062 | return -EINVAL; | 2205 | return -EINVAL; |
2206 | |||
2207 | user = acquire_ipmi_user(user, &index); | ||
2208 | if (!user) | ||
2209 | return -ENODEV; | ||
2210 | |||
2063 | rv = check_addr(user->intf, addr, &saddr, &lun); | 2211 | rv = check_addr(user->intf, addr, &saddr, &lun); |
2064 | if (rv) | 2212 | if (!rv) |
2065 | return rv; | 2213 | rv = i_ipmi_request(user, |
2066 | return i_ipmi_request(user, | 2214 | user->intf, |
2067 | user->intf, | 2215 | addr, |
2068 | addr, | 2216 | msgid, |
2069 | msgid, | 2217 | msg, |
2070 | msg, | 2218 | user_msg_data, |
2071 | user_msg_data, | 2219 | NULL, NULL, |
2072 | NULL, NULL, | 2220 | priority, |
2073 | priority, | 2221 | saddr, |
2074 | saddr, | 2222 | lun, |
2075 | lun, | 2223 | retries, |
2076 | retries, | 2224 | retry_time_ms); |
2077 | retry_time_ms); | 2225 | |
2226 | release_ipmi_user(user, index); | ||
2227 | return rv; | ||
2078 | } | 2228 | } |
2079 | EXPORT_SYMBOL(ipmi_request_settime); | 2229 | EXPORT_SYMBOL(ipmi_request_settime); |
2080 | 2230 | ||
2081 | int ipmi_request_supply_msgs(ipmi_user_t user, | 2231 | int ipmi_request_supply_msgs(struct ipmi_user *user, |
2082 | struct ipmi_addr *addr, | 2232 | struct ipmi_addr *addr, |
2083 | long msgid, | 2233 | long msgid, |
2084 | struct kernel_ipmi_msg *msg, | 2234 | struct kernel_ipmi_msg *msg, |
@@ -2088,29 +2238,37 @@ int ipmi_request_supply_msgs(ipmi_user_t user, | |||
2088 | int priority) | 2238 | int priority) |
2089 | { | 2239 | { |
2090 | unsigned char saddr = 0, lun = 0; | 2240 | unsigned char saddr = 0, lun = 0; |
2091 | int rv; | 2241 | int rv, index; |
2092 | 2242 | ||
2093 | if (!user) | 2243 | if (!user) |
2094 | return -EINVAL; | 2244 | return -EINVAL; |
2245 | |||
2246 | user = acquire_ipmi_user(user, &index); | ||
2247 | if (!user) | ||
2248 | return -ENODEV; | ||
2249 | |||
2095 | rv = check_addr(user->intf, addr, &saddr, &lun); | 2250 | rv = check_addr(user->intf, addr, &saddr, &lun); |
2096 | if (rv) | 2251 | if (!rv) |
2097 | return rv; | 2252 | rv = i_ipmi_request(user, |
2098 | return i_ipmi_request(user, | 2253 | user->intf, |
2099 | user->intf, | 2254 | addr, |
2100 | addr, | 2255 | msgid, |
2101 | msgid, | 2256 | msg, |
2102 | msg, | 2257 | user_msg_data, |
2103 | user_msg_data, | 2258 | supplied_smi, |
2104 | supplied_smi, | 2259 | supplied_recv, |
2105 | supplied_recv, | 2260 | priority, |
2106 | priority, | 2261 | saddr, |
2107 | saddr, | 2262 | lun, |
2108 | lun, | 2263 | -1, 0); |
2109 | -1, 0); | 2264 | |
2265 | release_ipmi_user(user, index); | ||
2266 | return rv; | ||
2110 | } | 2267 | } |
2111 | EXPORT_SYMBOL(ipmi_request_supply_msgs); | 2268 | EXPORT_SYMBOL(ipmi_request_supply_msgs); |
2112 | 2269 | ||
2113 | static void bmc_device_id_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | 2270 | static void bmc_device_id_handler(struct ipmi_smi *intf, |
2271 | struct ipmi_recv_msg *msg) | ||
2114 | { | 2272 | { |
2115 | int rv; | 2273 | int rv; |
2116 | 2274 | ||
@@ -2142,7 +2300,7 @@ static void bmc_device_id_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
2142 | } | 2300 | } |
2143 | 2301 | ||
2144 | static int | 2302 | static int |
2145 | send_get_device_id_cmd(ipmi_smi_t intf) | 2303 | send_get_device_id_cmd(struct ipmi_smi *intf) |
2146 | { | 2304 | { |
2147 | struct ipmi_system_interface_addr si; | 2305 | struct ipmi_system_interface_addr si; |
2148 | struct kernel_ipmi_msg msg; | 2306 | struct kernel_ipmi_msg msg; |
@@ -2170,7 +2328,7 @@ send_get_device_id_cmd(ipmi_smi_t intf) | |||
2170 | -1, 0); | 2328 | -1, 0); |
2171 | } | 2329 | } |
2172 | 2330 | ||
2173 | static int __get_device_id(ipmi_smi_t intf, struct bmc_device *bmc) | 2331 | static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) |
2174 | { | 2332 | { |
2175 | int rv; | 2333 | int rv; |
2176 | 2334 | ||
@@ -2204,7 +2362,7 @@ static int __get_device_id(ipmi_smi_t intf, struct bmc_device *bmc) | |||
2204 | * Except for the first time this is called (in ipmi_register_smi()), | 2362 | * Except for the first time this is called (in ipmi_register_smi()), |
2205 | * this will always return good data; | 2363 | * this will always return good data; |
2206 | */ | 2364 | */ |
2207 | static int __bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc, | 2365 | static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, |
2208 | struct ipmi_device_id *id, | 2366 | struct ipmi_device_id *id, |
2209 | bool *guid_set, guid_t *guid, int intf_num) | 2367 | bool *guid_set, guid_t *guid, int intf_num) |
2210 | { | 2368 | { |
@@ -2337,223 +2495,13 @@ out_noprocessing: | |||
2337 | return rv; | 2495 | return rv; |
2338 | } | 2496 | } |
2339 | 2497 | ||
2340 | static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc, | 2498 | static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, |
2341 | struct ipmi_device_id *id, | 2499 | struct ipmi_device_id *id, |
2342 | bool *guid_set, guid_t *guid) | 2500 | bool *guid_set, guid_t *guid) |
2343 | { | 2501 | { |
2344 | return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); | 2502 | return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); |
2345 | } | 2503 | } |
2346 | 2504 | ||
2347 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
2348 | static int smi_ipmb_proc_show(struct seq_file *m, void *v) | ||
2349 | { | ||
2350 | ipmi_smi_t intf = m->private; | ||
2351 | int i; | ||
2352 | |||
2353 | seq_printf(m, "%x", intf->addrinfo[0].address); | ||
2354 | for (i = 1; i < IPMI_MAX_CHANNELS; i++) | ||
2355 | seq_printf(m, " %x", intf->addrinfo[i].address); | ||
2356 | seq_putc(m, '\n'); | ||
2357 | |||
2358 | return 0; | ||
2359 | } | ||
2360 | |||
2361 | static int smi_ipmb_proc_open(struct inode *inode, struct file *file) | ||
2362 | { | ||
2363 | return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode)); | ||
2364 | } | ||
2365 | |||
2366 | static const struct file_operations smi_ipmb_proc_ops = { | ||
2367 | .open = smi_ipmb_proc_open, | ||
2368 | .read = seq_read, | ||
2369 | .llseek = seq_lseek, | ||
2370 | .release = single_release, | ||
2371 | }; | ||
2372 | |||
2373 | static int smi_version_proc_show(struct seq_file *m, void *v) | ||
2374 | { | ||
2375 | ipmi_smi_t intf = m->private; | ||
2376 | struct ipmi_device_id id; | ||
2377 | int rv; | ||
2378 | |||
2379 | rv = bmc_get_device_id(intf, NULL, &id, NULL, NULL); | ||
2380 | if (rv) | ||
2381 | return rv; | ||
2382 | |||
2383 | seq_printf(m, "%u.%u\n", | ||
2384 | ipmi_version_major(&id), | ||
2385 | ipmi_version_minor(&id)); | ||
2386 | |||
2387 | return 0; | ||
2388 | } | ||
2389 | |||
2390 | static int smi_version_proc_open(struct inode *inode, struct file *file) | ||
2391 | { | ||
2392 | return single_open(file, smi_version_proc_show, PDE_DATA(inode)); | ||
2393 | } | ||
2394 | |||
2395 | static const struct file_operations smi_version_proc_ops = { | ||
2396 | .open = smi_version_proc_open, | ||
2397 | .read = seq_read, | ||
2398 | .llseek = seq_lseek, | ||
2399 | .release = single_release, | ||
2400 | }; | ||
2401 | |||
2402 | static int smi_stats_proc_show(struct seq_file *m, void *v) | ||
2403 | { | ||
2404 | ipmi_smi_t intf = m->private; | ||
2405 | |||
2406 | seq_printf(m, "sent_invalid_commands: %u\n", | ||
2407 | ipmi_get_stat(intf, sent_invalid_commands)); | ||
2408 | seq_printf(m, "sent_local_commands: %u\n", | ||
2409 | ipmi_get_stat(intf, sent_local_commands)); | ||
2410 | seq_printf(m, "handled_local_responses: %u\n", | ||
2411 | ipmi_get_stat(intf, handled_local_responses)); | ||
2412 | seq_printf(m, "unhandled_local_responses: %u\n", | ||
2413 | ipmi_get_stat(intf, unhandled_local_responses)); | ||
2414 | seq_printf(m, "sent_ipmb_commands: %u\n", | ||
2415 | ipmi_get_stat(intf, sent_ipmb_commands)); | ||
2416 | seq_printf(m, "sent_ipmb_command_errs: %u\n", | ||
2417 | ipmi_get_stat(intf, sent_ipmb_command_errs)); | ||
2418 | seq_printf(m, "retransmitted_ipmb_commands: %u\n", | ||
2419 | ipmi_get_stat(intf, retransmitted_ipmb_commands)); | ||
2420 | seq_printf(m, "timed_out_ipmb_commands: %u\n", | ||
2421 | ipmi_get_stat(intf, timed_out_ipmb_commands)); | ||
2422 | seq_printf(m, "timed_out_ipmb_broadcasts: %u\n", | ||
2423 | ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); | ||
2424 | seq_printf(m, "sent_ipmb_responses: %u\n", | ||
2425 | ipmi_get_stat(intf, sent_ipmb_responses)); | ||
2426 | seq_printf(m, "handled_ipmb_responses: %u\n", | ||
2427 | ipmi_get_stat(intf, handled_ipmb_responses)); | ||
2428 | seq_printf(m, "invalid_ipmb_responses: %u\n", | ||
2429 | ipmi_get_stat(intf, invalid_ipmb_responses)); | ||
2430 | seq_printf(m, "unhandled_ipmb_responses: %u\n", | ||
2431 | ipmi_get_stat(intf, unhandled_ipmb_responses)); | ||
2432 | seq_printf(m, "sent_lan_commands: %u\n", | ||
2433 | ipmi_get_stat(intf, sent_lan_commands)); | ||
2434 | seq_printf(m, "sent_lan_command_errs: %u\n", | ||
2435 | ipmi_get_stat(intf, sent_lan_command_errs)); | ||
2436 | seq_printf(m, "retransmitted_lan_commands: %u\n", | ||
2437 | ipmi_get_stat(intf, retransmitted_lan_commands)); | ||
2438 | seq_printf(m, "timed_out_lan_commands: %u\n", | ||
2439 | ipmi_get_stat(intf, timed_out_lan_commands)); | ||
2440 | seq_printf(m, "sent_lan_responses: %u\n", | ||
2441 | ipmi_get_stat(intf, sent_lan_responses)); | ||
2442 | seq_printf(m, "handled_lan_responses: %u\n", | ||
2443 | ipmi_get_stat(intf, handled_lan_responses)); | ||
2444 | seq_printf(m, "invalid_lan_responses: %u\n", | ||
2445 | ipmi_get_stat(intf, invalid_lan_responses)); | ||
2446 | seq_printf(m, "unhandled_lan_responses: %u\n", | ||
2447 | ipmi_get_stat(intf, unhandled_lan_responses)); | ||
2448 | seq_printf(m, "handled_commands: %u\n", | ||
2449 | ipmi_get_stat(intf, handled_commands)); | ||
2450 | seq_printf(m, "invalid_commands: %u\n", | ||
2451 | ipmi_get_stat(intf, invalid_commands)); | ||
2452 | seq_printf(m, "unhandled_commands: %u\n", | ||
2453 | ipmi_get_stat(intf, unhandled_commands)); | ||
2454 | seq_printf(m, "invalid_events: %u\n", | ||
2455 | ipmi_get_stat(intf, invalid_events)); | ||
2456 | seq_printf(m, "events: %u\n", | ||
2457 | ipmi_get_stat(intf, events)); | ||
2458 | seq_printf(m, "failed rexmit LAN msgs: %u\n", | ||
2459 | ipmi_get_stat(intf, dropped_rexmit_lan_commands)); | ||
2460 | seq_printf(m, "failed rexmit IPMB msgs: %u\n", | ||
2461 | ipmi_get_stat(intf, dropped_rexmit_ipmb_commands)); | ||
2462 | return 0; | ||
2463 | } | ||
2464 | |||
2465 | static int smi_stats_proc_open(struct inode *inode, struct file *file) | ||
2466 | { | ||
2467 | return single_open(file, smi_stats_proc_show, PDE_DATA(inode)); | ||
2468 | } | ||
2469 | |||
2470 | static const struct file_operations smi_stats_proc_ops = { | ||
2471 | .open = smi_stats_proc_open, | ||
2472 | .read = seq_read, | ||
2473 | .llseek = seq_lseek, | ||
2474 | .release = single_release, | ||
2475 | }; | ||
2476 | |||
2477 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | ||
2478 | const struct file_operations *proc_ops, | ||
2479 | void *data) | ||
2480 | { | ||
2481 | int rv = 0; | ||
2482 | struct proc_dir_entry *file; | ||
2483 | struct ipmi_proc_entry *entry; | ||
2484 | |||
2485 | /* Create a list element. */ | ||
2486 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
2487 | if (!entry) | ||
2488 | return -ENOMEM; | ||
2489 | entry->name = kstrdup(name, GFP_KERNEL); | ||
2490 | if (!entry->name) { | ||
2491 | kfree(entry); | ||
2492 | return -ENOMEM; | ||
2493 | } | ||
2494 | |||
2495 | file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); | ||
2496 | if (!file) { | ||
2497 | kfree(entry->name); | ||
2498 | kfree(entry); | ||
2499 | rv = -ENOMEM; | ||
2500 | } else { | ||
2501 | mutex_lock(&smi->proc_entry_lock); | ||
2502 | /* Stick it on the list. */ | ||
2503 | entry->next = smi->proc_entries; | ||
2504 | smi->proc_entries = entry; | ||
2505 | mutex_unlock(&smi->proc_entry_lock); | ||
2506 | } | ||
2507 | |||
2508 | return rv; | ||
2509 | } | ||
2510 | EXPORT_SYMBOL(ipmi_smi_add_proc_entry); | ||
2511 | |||
2512 | static int add_proc_entries(ipmi_smi_t smi, int num) | ||
2513 | { | ||
2514 | int rv = 0; | ||
2515 | |||
2516 | sprintf(smi->proc_dir_name, "%d", num); | ||
2517 | smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); | ||
2518 | if (!smi->proc_dir) | ||
2519 | rv = -ENOMEM; | ||
2520 | |||
2521 | if (rv == 0) | ||
2522 | rv = ipmi_smi_add_proc_entry(smi, "stats", | ||
2523 | &smi_stats_proc_ops, | ||
2524 | smi); | ||
2525 | |||
2526 | if (rv == 0) | ||
2527 | rv = ipmi_smi_add_proc_entry(smi, "ipmb", | ||
2528 | &smi_ipmb_proc_ops, | ||
2529 | smi); | ||
2530 | |||
2531 | if (rv == 0) | ||
2532 | rv = ipmi_smi_add_proc_entry(smi, "version", | ||
2533 | &smi_version_proc_ops, | ||
2534 | smi); | ||
2535 | |||
2536 | return rv; | ||
2537 | } | ||
2538 | |||
2539 | static void remove_proc_entries(ipmi_smi_t smi) | ||
2540 | { | ||
2541 | struct ipmi_proc_entry *entry; | ||
2542 | |||
2543 | mutex_lock(&smi->proc_entry_lock); | ||
2544 | while (smi->proc_entries) { | ||
2545 | entry = smi->proc_entries; | ||
2546 | smi->proc_entries = entry->next; | ||
2547 | |||
2548 | remove_proc_entry(entry->name, smi->proc_dir); | ||
2549 | kfree(entry->name); | ||
2550 | kfree(entry); | ||
2551 | } | ||
2552 | mutex_unlock(&smi->proc_entry_lock); | ||
2553 | remove_proc_entry(smi->proc_dir_name, proc_ipmi_root); | ||
2554 | } | ||
2555 | #endif /* CONFIG_IPMI_PROC_INTERFACE */ | ||
2556 | |||
2557 | static ssize_t device_id_show(struct device *dev, | 2505 | static ssize_t device_id_show(struct device *dev, |
2558 | struct device_attribute *attr, | 2506 | struct device_attribute *attr, |
2559 | char *buf) | 2507 | char *buf) |
@@ -2885,7 +2833,7 @@ cleanup_bmc_device(struct kref *ref) | |||
2885 | /* | 2833 | /* |
2886 | * Must be called with intf->bmc_reg_mutex held. | 2834 | * Must be called with intf->bmc_reg_mutex held. |
2887 | */ | 2835 | */ |
2888 | static void __ipmi_bmc_unregister(ipmi_smi_t intf) | 2836 | static void __ipmi_bmc_unregister(struct ipmi_smi *intf) |
2889 | { | 2837 | { |
2890 | struct bmc_device *bmc = intf->bmc; | 2838 | struct bmc_device *bmc = intf->bmc; |
2891 | 2839 | ||
@@ -2905,7 +2853,7 @@ static void __ipmi_bmc_unregister(ipmi_smi_t intf) | |||
2905 | intf->bmc_registered = false; | 2853 | intf->bmc_registered = false; |
2906 | } | 2854 | } |
2907 | 2855 | ||
2908 | static void ipmi_bmc_unregister(ipmi_smi_t intf) | 2856 | static void ipmi_bmc_unregister(struct ipmi_smi *intf) |
2909 | { | 2857 | { |
2910 | mutex_lock(&intf->bmc_reg_mutex); | 2858 | mutex_lock(&intf->bmc_reg_mutex); |
2911 | __ipmi_bmc_unregister(intf); | 2859 | __ipmi_bmc_unregister(intf); |
@@ -2915,7 +2863,7 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf) | |||
2915 | /* | 2863 | /* |
2916 | * Must be called with intf->bmc_reg_mutex held. | 2864 | * Must be called with intf->bmc_reg_mutex held. |
2917 | */ | 2865 | */ |
2918 | static int __ipmi_bmc_register(ipmi_smi_t intf, | 2866 | static int __ipmi_bmc_register(struct ipmi_smi *intf, |
2919 | struct ipmi_device_id *id, | 2867 | struct ipmi_device_id *id, |
2920 | bool guid_set, guid_t *guid, int intf_num) | 2868 | bool guid_set, guid_t *guid, int intf_num) |
2921 | { | 2869 | { |
@@ -3077,7 +3025,7 @@ out_list_del: | |||
3077 | } | 3025 | } |
3078 | 3026 | ||
3079 | static int | 3027 | static int |
3080 | send_guid_cmd(ipmi_smi_t intf, int chan) | 3028 | send_guid_cmd(struct ipmi_smi *intf, int chan) |
3081 | { | 3029 | { |
3082 | struct kernel_ipmi_msg msg; | 3030 | struct kernel_ipmi_msg msg; |
3083 | struct ipmi_system_interface_addr si; | 3031 | struct ipmi_system_interface_addr si; |
@@ -3104,7 +3052,7 @@ send_guid_cmd(ipmi_smi_t intf, int chan) | |||
3104 | -1, 0); | 3052 | -1, 0); |
3105 | } | 3053 | } |
3106 | 3054 | ||
3107 | static void guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | 3055 | static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) |
3108 | { | 3056 | { |
3109 | struct bmc_device *bmc = intf->bmc; | 3057 | struct bmc_device *bmc = intf->bmc; |
3110 | 3058 | ||
@@ -3139,7 +3087,7 @@ static void guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
3139 | wake_up(&intf->waitq); | 3087 | wake_up(&intf->waitq); |
3140 | } | 3088 | } |
3141 | 3089 | ||
3142 | static void __get_guid(ipmi_smi_t intf) | 3090 | static void __get_guid(struct ipmi_smi *intf) |
3143 | { | 3091 | { |
3144 | int rv; | 3092 | int rv; |
3145 | struct bmc_device *bmc = intf->bmc; | 3093 | struct bmc_device *bmc = intf->bmc; |
@@ -3160,7 +3108,7 @@ static void __get_guid(ipmi_smi_t intf) | |||
3160 | } | 3108 | } |
3161 | 3109 | ||
3162 | static int | 3110 | static int |
3163 | send_channel_info_cmd(ipmi_smi_t intf, int chan) | 3111 | send_channel_info_cmd(struct ipmi_smi *intf, int chan) |
3164 | { | 3112 | { |
3165 | struct kernel_ipmi_msg msg; | 3113 | struct kernel_ipmi_msg msg; |
3166 | unsigned char data[1]; | 3114 | unsigned char data[1]; |
@@ -3190,7 +3138,7 @@ send_channel_info_cmd(ipmi_smi_t intf, int chan) | |||
3190 | } | 3138 | } |
3191 | 3139 | ||
3192 | static void | 3140 | static void |
3193 | channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | 3141 | channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) |
3194 | { | 3142 | { |
3195 | int rv = 0; | 3143 | int rv = 0; |
3196 | int ch; | 3144 | int ch; |
@@ -3262,7 +3210,7 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
3262 | /* | 3210 | /* |
3263 | * Must be holding intf->bmc_reg_mutex to call this. | 3211 | * Must be holding intf->bmc_reg_mutex to call this. |
3264 | */ | 3212 | */ |
3265 | static int __scan_channels(ipmi_smi_t intf, struct ipmi_device_id *id) | 3213 | static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) |
3266 | { | 3214 | { |
3267 | int rv; | 3215 | int rv; |
3268 | 3216 | ||
@@ -3306,7 +3254,7 @@ static int __scan_channels(ipmi_smi_t intf, struct ipmi_device_id *id) | |||
3306 | return 0; | 3254 | return 0; |
3307 | } | 3255 | } |
3308 | 3256 | ||
3309 | static void ipmi_poll(ipmi_smi_t intf) | 3257 | static void ipmi_poll(struct ipmi_smi *intf) |
3310 | { | 3258 | { |
3311 | if (intf->handlers->poll) | 3259 | if (intf->handlers->poll) |
3312 | intf->handlers->poll(intf->send_info); | 3260 | intf->handlers->poll(intf->send_info); |
@@ -3314,7 +3262,7 @@ static void ipmi_poll(ipmi_smi_t intf) | |||
3314 | handle_new_recv_msgs(intf); | 3262 | handle_new_recv_msgs(intf); |
3315 | } | 3263 | } |
3316 | 3264 | ||
3317 | void ipmi_poll_interface(ipmi_user_t user) | 3265 | void ipmi_poll_interface(struct ipmi_user *user) |
3318 | { | 3266 | { |
3319 | ipmi_poll(user->intf); | 3267 | ipmi_poll(user->intf); |
3320 | } | 3268 | } |
@@ -3322,7 +3270,8 @@ EXPORT_SYMBOL(ipmi_poll_interface); | |||
3322 | 3270 | ||
3323 | static void redo_bmc_reg(struct work_struct *work) | 3271 | static void redo_bmc_reg(struct work_struct *work) |
3324 | { | 3272 | { |
3325 | ipmi_smi_t intf = container_of(work, struct ipmi_smi, bmc_reg_work); | 3273 | struct ipmi_smi *intf = container_of(work, struct ipmi_smi, |
3274 | bmc_reg_work); | ||
3326 | 3275 | ||
3327 | if (!intf->in_shutdown) | 3276 | if (!intf->in_shutdown) |
3328 | bmc_get_device_id(intf, NULL, NULL, NULL, NULL); | 3277 | bmc_get_device_id(intf, NULL, NULL, NULL, NULL); |
@@ -3337,8 +3286,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | |||
3337 | { | 3286 | { |
3338 | int i, j; | 3287 | int i, j; |
3339 | int rv; | 3288 | int rv; |
3340 | ipmi_smi_t intf; | 3289 | struct ipmi_smi *intf, *tintf; |
3341 | ipmi_smi_t tintf; | ||
3342 | struct list_head *link; | 3290 | struct list_head *link; |
3343 | struct ipmi_device_id id; | 3291 | struct ipmi_device_id id; |
3344 | 3292 | ||
@@ -3362,6 +3310,13 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | |||
3362 | if (!intf) | 3310 | if (!intf) |
3363 | return -ENOMEM; | 3311 | return -ENOMEM; |
3364 | 3312 | ||
3313 | rv = init_srcu_struct(&intf->users_srcu); | ||
3314 | if (rv) { | ||
3315 | kfree(intf); | ||
3316 | return rv; | ||
3317 | } | ||
3318 | |||
3319 | |||
3365 | intf->bmc = &intf->tmp_bmc; | 3320 | intf->bmc = &intf->tmp_bmc; |
3366 | INIT_LIST_HEAD(&intf->bmc->intfs); | 3321 | INIT_LIST_HEAD(&intf->bmc->intfs); |
3367 | mutex_init(&intf->bmc->dyn_mutex); | 3322 | mutex_init(&intf->bmc->dyn_mutex); |
@@ -3386,9 +3341,6 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | |||
3386 | intf->seq_table[j].seqid = 0; | 3341 | intf->seq_table[j].seqid = 0; |
3387 | } | 3342 | } |
3388 | intf->curr_seq = 0; | 3343 | intf->curr_seq = 0; |
3389 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
3390 | mutex_init(&intf->proc_entry_lock); | ||
3391 | #endif | ||
3392 | spin_lock_init(&intf->waiting_rcv_msgs_lock); | 3344 | spin_lock_init(&intf->waiting_rcv_msgs_lock); |
3393 | INIT_LIST_HEAD(&intf->waiting_rcv_msgs); | 3345 | INIT_LIST_HEAD(&intf->waiting_rcv_msgs); |
3394 | tasklet_init(&intf->recv_tasklet, | 3346 | tasklet_init(&intf->recv_tasklet, |
@@ -3410,11 +3362,6 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | |||
3410 | for (i = 0; i < IPMI_NUM_STATS; i++) | 3362 | for (i = 0; i < IPMI_NUM_STATS; i++) |
3411 | atomic_set(&intf->stats[i], 0); | 3363 | atomic_set(&intf->stats[i], 0); |
3412 | 3364 | ||
3413 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
3414 | intf->proc_dir = NULL; | ||
3415 | #endif | ||
3416 | |||
3417 | mutex_lock(&smi_watchers_mutex); | ||
3418 | mutex_lock(&ipmi_interfaces_mutex); | 3365 | mutex_lock(&ipmi_interfaces_mutex); |
3419 | /* Look for a hole in the numbers. */ | 3366 | /* Look for a hole in the numbers. */ |
3420 | i = 0; | 3367 | i = 0; |
@@ -3445,25 +3392,14 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | |||
3445 | mutex_lock(&intf->bmc_reg_mutex); | 3392 | mutex_lock(&intf->bmc_reg_mutex); |
3446 | rv = __scan_channels(intf, &id); | 3393 | rv = __scan_channels(intf, &id); |
3447 | mutex_unlock(&intf->bmc_reg_mutex); | 3394 | mutex_unlock(&intf->bmc_reg_mutex); |
3448 | if (rv) | ||
3449 | goto out; | ||
3450 | |||
3451 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
3452 | rv = add_proc_entries(intf, i); | ||
3453 | #endif | ||
3454 | 3395 | ||
3455 | out: | 3396 | out: |
3456 | if (rv) { | 3397 | if (rv) { |
3457 | ipmi_bmc_unregister(intf); | 3398 | ipmi_bmc_unregister(intf); |
3458 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
3459 | if (intf->proc_dir) | ||
3460 | remove_proc_entries(intf); | ||
3461 | #endif | ||
3462 | intf->handlers = NULL; | ||
3463 | list_del_rcu(&intf->link); | 3399 | list_del_rcu(&intf->link); |
3464 | mutex_unlock(&ipmi_interfaces_mutex); | 3400 | mutex_unlock(&ipmi_interfaces_mutex); |
3465 | mutex_unlock(&smi_watchers_mutex); | 3401 | synchronize_srcu(&ipmi_interfaces_srcu); |
3466 | synchronize_rcu(); | 3402 | cleanup_srcu_struct(&intf->users_srcu); |
3467 | kref_put(&intf->refcount, intf_free); | 3403 | kref_put(&intf->refcount, intf_free); |
3468 | } else { | 3404 | } else { |
3469 | /* | 3405 | /* |
@@ -3474,16 +3410,16 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | |||
3474 | smp_wmb(); | 3410 | smp_wmb(); |
3475 | intf->intf_num = i; | 3411 | intf->intf_num = i; |
3476 | mutex_unlock(&ipmi_interfaces_mutex); | 3412 | mutex_unlock(&ipmi_interfaces_mutex); |
3413 | |||
3477 | /* After this point the interface is legal to use. */ | 3414 | /* After this point the interface is legal to use. */ |
3478 | call_smi_watchers(i, intf->si_dev); | 3415 | call_smi_watchers(i, intf->si_dev); |
3479 | mutex_unlock(&smi_watchers_mutex); | ||
3480 | } | 3416 | } |
3481 | 3417 | ||
3482 | return rv; | 3418 | return rv; |
3483 | } | 3419 | } |
3484 | EXPORT_SYMBOL(ipmi_register_smi); | 3420 | EXPORT_SYMBOL(ipmi_register_smi); |
3485 | 3421 | ||
3486 | static void deliver_smi_err_response(ipmi_smi_t intf, | 3422 | static void deliver_smi_err_response(struct ipmi_smi *intf, |
3487 | struct ipmi_smi_msg *msg, | 3423 | struct ipmi_smi_msg *msg, |
3488 | unsigned char err) | 3424 | unsigned char err) |
3489 | { | 3425 | { |
@@ -3495,7 +3431,7 @@ static void deliver_smi_err_response(ipmi_smi_t intf, | |||
3495 | handle_one_recv_msg(intf, msg); | 3431 | handle_one_recv_msg(intf, msg); |
3496 | } | 3432 | } |
3497 | 3433 | ||
3498 | static void cleanup_smi_msgs(ipmi_smi_t intf) | 3434 | static void cleanup_smi_msgs(struct ipmi_smi *intf) |
3499 | { | 3435 | { |
3500 | int i; | 3436 | int i; |
3501 | struct seq_table *ent; | 3437 | struct seq_table *ent; |
@@ -3528,60 +3464,58 @@ static void cleanup_smi_msgs(ipmi_smi_t intf) | |||
3528 | } | 3464 | } |
3529 | 3465 | ||
3530 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { | 3466 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { |
3531 | ent = &(intf->seq_table[i]); | 3467 | ent = &intf->seq_table[i]; |
3532 | if (!ent->inuse) | 3468 | if (!ent->inuse) |
3533 | continue; | 3469 | continue; |
3534 | deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED); | 3470 | deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); |
3535 | } | 3471 | } |
3536 | } | 3472 | } |
3537 | 3473 | ||
3538 | int ipmi_unregister_smi(ipmi_smi_t intf) | 3474 | void ipmi_unregister_smi(struct ipmi_smi *intf) |
3539 | { | 3475 | { |
3540 | struct ipmi_smi_watcher *w; | 3476 | struct ipmi_smi_watcher *w; |
3541 | int intf_num = intf->intf_num; | 3477 | int intf_num = intf->intf_num, index; |
3542 | ipmi_user_t user; | ||
3543 | 3478 | ||
3544 | mutex_lock(&smi_watchers_mutex); | ||
3545 | mutex_lock(&ipmi_interfaces_mutex); | 3479 | mutex_lock(&ipmi_interfaces_mutex); |
3546 | intf->intf_num = -1; | 3480 | intf->intf_num = -1; |
3547 | intf->in_shutdown = true; | 3481 | intf->in_shutdown = true; |
3548 | list_del_rcu(&intf->link); | 3482 | list_del_rcu(&intf->link); |
3549 | mutex_unlock(&ipmi_interfaces_mutex); | 3483 | mutex_unlock(&ipmi_interfaces_mutex); |
3550 | synchronize_rcu(); | 3484 | synchronize_srcu(&ipmi_interfaces_srcu); |
3551 | 3485 | ||
3552 | cleanup_smi_msgs(intf); | 3486 | /* At this point no users can be added to the interface. */ |
3553 | |||
3554 | /* Clean up the effects of users on the lower-level software. */ | ||
3555 | mutex_lock(&ipmi_interfaces_mutex); | ||
3556 | rcu_read_lock(); | ||
3557 | list_for_each_entry_rcu(user, &intf->users, link) { | ||
3558 | module_put(intf->handlers->owner); | ||
3559 | if (intf->handlers->dec_usecount) | ||
3560 | intf->handlers->dec_usecount(intf->send_info); | ||
3561 | } | ||
3562 | rcu_read_unlock(); | ||
3563 | intf->handlers = NULL; | ||
3564 | mutex_unlock(&ipmi_interfaces_mutex); | ||
3565 | |||
3566 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
3567 | remove_proc_entries(intf); | ||
3568 | #endif | ||
3569 | ipmi_bmc_unregister(intf); | ||
3570 | 3487 | ||
3571 | /* | 3488 | /* |
3572 | * Call all the watcher interfaces to tell them that | 3489 | * Call all the watcher interfaces to tell them that |
3573 | * an interface is gone. | 3490 | * an interface is going away. |
3574 | */ | 3491 | */ |
3492 | mutex_lock(&smi_watchers_mutex); | ||
3575 | list_for_each_entry(w, &smi_watchers, link) | 3493 | list_for_each_entry(w, &smi_watchers, link) |
3576 | w->smi_gone(intf_num); | 3494 | w->smi_gone(intf_num); |
3577 | mutex_unlock(&smi_watchers_mutex); | 3495 | mutex_unlock(&smi_watchers_mutex); |
3578 | 3496 | ||
3497 | index = srcu_read_lock(&intf->users_srcu); | ||
3498 | while (!list_empty(&intf->users)) { | ||
3499 | struct ipmi_user *user = | ||
3500 | container_of(list_next_rcu(&intf->users), | ||
3501 | struct ipmi_user, link); | ||
3502 | |||
3503 | _ipmi_destroy_user(user); | ||
3504 | } | ||
3505 | srcu_read_unlock(&intf->users_srcu, index); | ||
3506 | |||
3507 | intf->handlers->shutdown(intf->send_info); | ||
3508 | |||
3509 | cleanup_smi_msgs(intf); | ||
3510 | |||
3511 | ipmi_bmc_unregister(intf); | ||
3512 | |||
3513 | cleanup_srcu_struct(&intf->users_srcu); | ||
3579 | kref_put(&intf->refcount, intf_free); | 3514 | kref_put(&intf->refcount, intf_free); |
3580 | return 0; | ||
3581 | } | 3515 | } |
3582 | EXPORT_SYMBOL(ipmi_unregister_smi); | 3516 | EXPORT_SYMBOL(ipmi_unregister_smi); |
3583 | 3517 | ||
3584 | static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, | 3518 | static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, |
3585 | struct ipmi_smi_msg *msg) | 3519 | struct ipmi_smi_msg *msg) |
3586 | { | 3520 | { |
3587 | struct ipmi_ipmb_addr ipmb_addr; | 3521 | struct ipmi_ipmb_addr ipmb_addr; |
@@ -3616,7 +3550,7 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, | |||
3616 | msg->rsp[3] & 0x0f, | 3550 | msg->rsp[3] & 0x0f, |
3617 | msg->rsp[8], | 3551 | msg->rsp[8], |
3618 | (msg->rsp[4] >> 2) & (~1), | 3552 | (msg->rsp[4] >> 2) & (~1), |
3619 | (struct ipmi_addr *) &(ipmb_addr), | 3553 | (struct ipmi_addr *) &ipmb_addr, |
3620 | &recv_msg)) { | 3554 | &recv_msg)) { |
3621 | /* | 3555 | /* |
3622 | * We were unable to find the sequence number, | 3556 | * We were unable to find the sequence number, |
@@ -3626,9 +3560,7 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, | |||
3626 | return 0; | 3560 | return 0; |
3627 | } | 3561 | } |
3628 | 3562 | ||
3629 | memcpy(recv_msg->msg_data, | 3563 | memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); |
3630 | &(msg->rsp[9]), | ||
3631 | msg->rsp_size - 9); | ||
3632 | /* | 3564 | /* |
3633 | * The other fields matched, so no need to set them, except | 3565 | * The other fields matched, so no need to set them, except |
3634 | * for netfn, which needs to be the response that was | 3566 | * for netfn, which needs to be the response that was |
@@ -3638,13 +3570,15 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, | |||
3638 | recv_msg->msg.data = recv_msg->msg_data; | 3570 | recv_msg->msg.data = recv_msg->msg_data; |
3639 | recv_msg->msg.data_len = msg->rsp_size - 10; | 3571 | recv_msg->msg.data_len = msg->rsp_size - 10; |
3640 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 3572 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
3641 | ipmi_inc_stat(intf, handled_ipmb_responses); | 3573 | if (deliver_response(intf, recv_msg)) |
3642 | deliver_response(recv_msg); | 3574 | ipmi_inc_stat(intf, unhandled_ipmb_responses); |
3575 | else | ||
3576 | ipmi_inc_stat(intf, handled_ipmb_responses); | ||
3643 | 3577 | ||
3644 | return 0; | 3578 | return 0; |
3645 | } | 3579 | } |
3646 | 3580 | ||
3647 | static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | 3581 | static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, |
3648 | struct ipmi_smi_msg *msg) | 3582 | struct ipmi_smi_msg *msg) |
3649 | { | 3583 | { |
3650 | struct cmd_rcvr *rcvr; | 3584 | struct cmd_rcvr *rcvr; |
@@ -3652,7 +3586,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
3652 | unsigned char netfn; | 3586 | unsigned char netfn; |
3653 | unsigned char cmd; | 3587 | unsigned char cmd; |
3654 | unsigned char chan; | 3588 | unsigned char chan; |
3655 | ipmi_user_t user = NULL; | 3589 | struct ipmi_user *user = NULL; |
3656 | struct ipmi_ipmb_addr *ipmb_addr; | 3590 | struct ipmi_ipmb_addr *ipmb_addr; |
3657 | struct ipmi_recv_msg *recv_msg; | 3591 | struct ipmi_recv_msg *recv_msg; |
3658 | 3592 | ||
@@ -3689,24 +3623,17 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
3689 | msg->data[2] = msg->rsp[3]; | 3623 | msg->data[2] = msg->rsp[3]; |
3690 | msg->data[3] = msg->rsp[6]; | 3624 | msg->data[3] = msg->rsp[6]; |
3691 | msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); | 3625 | msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); |
3692 | msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); | 3626 | msg->data[5] = ipmb_checksum(&msg->data[3], 2); |
3693 | msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; | 3627 | msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; |
3694 | /* rqseq/lun */ | 3628 | /* rqseq/lun */ |
3695 | msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); | 3629 | msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); |
3696 | msg->data[8] = msg->rsp[8]; /* cmd */ | 3630 | msg->data[8] = msg->rsp[8]; /* cmd */ |
3697 | msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; | 3631 | msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; |
3698 | msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); | 3632 | msg->data[10] = ipmb_checksum(&msg->data[6], 4); |
3699 | msg->data_size = 11; | 3633 | msg->data_size = 11; |
3700 | 3634 | ||
3701 | #ifdef DEBUG_MSGING | 3635 | ipmi_debug_msg("Invalid command:", msg->data, msg->data_size); |
3702 | { | 3636 | |
3703 | int m; | ||
3704 | printk("Invalid command:"); | ||
3705 | for (m = 0; m < msg->data_size; m++) | ||
3706 | printk(" %2.2x", msg->data[m]); | ||
3707 | printk("\n"); | ||
3708 | } | ||
3709 | #endif | ||
3710 | rcu_read_lock(); | 3637 | rcu_read_lock(); |
3711 | if (!intf->in_shutdown) { | 3638 | if (!intf->in_shutdown) { |
3712 | smi_send(intf, intf->handlers, msg, 0); | 3639 | smi_send(intf, intf->handlers, msg, 0); |
@@ -3719,9 +3646,6 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
3719 | } | 3646 | } |
3720 | rcu_read_unlock(); | 3647 | rcu_read_unlock(); |
3721 | } else { | 3648 | } else { |
3722 | /* Deliver the message to the user. */ | ||
3723 | ipmi_inc_stat(intf, handled_commands); | ||
3724 | |||
3725 | recv_msg = ipmi_alloc_recv_msg(); | 3649 | recv_msg = ipmi_alloc_recv_msg(); |
3726 | if (!recv_msg) { | 3650 | if (!recv_msg) { |
3727 | /* | 3651 | /* |
@@ -3755,17 +3679,19 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
3755 | * at the end also needs to be removed. | 3679 | * at the end also needs to be removed. |
3756 | */ | 3680 | */ |
3757 | recv_msg->msg.data_len = msg->rsp_size - 10; | 3681 | recv_msg->msg.data_len = msg->rsp_size - 10; |
3758 | memcpy(recv_msg->msg_data, | 3682 | memcpy(recv_msg->msg_data, &msg->rsp[9], |
3759 | &(msg->rsp[9]), | ||
3760 | msg->rsp_size - 10); | 3683 | msg->rsp_size - 10); |
3761 | deliver_response(recv_msg); | 3684 | if (deliver_response(intf, recv_msg)) |
3685 | ipmi_inc_stat(intf, unhandled_commands); | ||
3686 | else | ||
3687 | ipmi_inc_stat(intf, handled_commands); | ||
3762 | } | 3688 | } |
3763 | } | 3689 | } |
3764 | 3690 | ||
3765 | return rv; | 3691 | return rv; |
3766 | } | 3692 | } |
3767 | 3693 | ||
3768 | static int handle_lan_get_msg_rsp(ipmi_smi_t intf, | 3694 | static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, |
3769 | struct ipmi_smi_msg *msg) | 3695 | struct ipmi_smi_msg *msg) |
3770 | { | 3696 | { |
3771 | struct ipmi_lan_addr lan_addr; | 3697 | struct ipmi_lan_addr lan_addr; |
@@ -3804,7 +3730,7 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf, | |||
3804 | msg->rsp[3] & 0x0f, | 3730 | msg->rsp[3] & 0x0f, |
3805 | msg->rsp[10], | 3731 | msg->rsp[10], |
3806 | (msg->rsp[6] >> 2) & (~1), | 3732 | (msg->rsp[6] >> 2) & (~1), |
3807 | (struct ipmi_addr *) &(lan_addr), | 3733 | (struct ipmi_addr *) &lan_addr, |
3808 | &recv_msg)) { | 3734 | &recv_msg)) { |
3809 | /* | 3735 | /* |
3810 | * We were unable to find the sequence number, | 3736 | * We were unable to find the sequence number, |
@@ -3814,9 +3740,7 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf, | |||
3814 | return 0; | 3740 | return 0; |
3815 | } | 3741 | } |
3816 | 3742 | ||
3817 | memcpy(recv_msg->msg_data, | 3743 | memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); |
3818 | &(msg->rsp[11]), | ||
3819 | msg->rsp_size - 11); | ||
3820 | /* | 3744 | /* |
3821 | * The other fields matched, so no need to set them, except | 3745 | * The other fields matched, so no need to set them, except |
3822 | * for netfn, which needs to be the response that was | 3746 | * for netfn, which needs to be the response that was |
@@ -3826,13 +3750,15 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf, | |||
3826 | recv_msg->msg.data = recv_msg->msg_data; | 3750 | recv_msg->msg.data = recv_msg->msg_data; |
3827 | recv_msg->msg.data_len = msg->rsp_size - 12; | 3751 | recv_msg->msg.data_len = msg->rsp_size - 12; |
3828 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 3752 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
3829 | ipmi_inc_stat(intf, handled_lan_responses); | 3753 | if (deliver_response(intf, recv_msg)) |
3830 | deliver_response(recv_msg); | 3754 | ipmi_inc_stat(intf, unhandled_lan_responses); |
3755 | else | ||
3756 | ipmi_inc_stat(intf, handled_lan_responses); | ||
3831 | 3757 | ||
3832 | return 0; | 3758 | return 0; |
3833 | } | 3759 | } |
3834 | 3760 | ||
3835 | static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | 3761 | static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, |
3836 | struct ipmi_smi_msg *msg) | 3762 | struct ipmi_smi_msg *msg) |
3837 | { | 3763 | { |
3838 | struct cmd_rcvr *rcvr; | 3764 | struct cmd_rcvr *rcvr; |
@@ -3840,7 +3766,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3840 | unsigned char netfn; | 3766 | unsigned char netfn; |
3841 | unsigned char cmd; | 3767 | unsigned char cmd; |
3842 | unsigned char chan; | 3768 | unsigned char chan; |
3843 | ipmi_user_t user = NULL; | 3769 | struct ipmi_user *user = NULL; |
3844 | struct ipmi_lan_addr *lan_addr; | 3770 | struct ipmi_lan_addr *lan_addr; |
3845 | struct ipmi_recv_msg *recv_msg; | 3771 | struct ipmi_recv_msg *recv_msg; |
3846 | 3772 | ||
@@ -3878,9 +3804,6 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3878 | */ | 3804 | */ |
3879 | rv = 0; | 3805 | rv = 0; |
3880 | } else { | 3806 | } else { |
3881 | /* Deliver the message to the user. */ | ||
3882 | ipmi_inc_stat(intf, handled_commands); | ||
3883 | |||
3884 | recv_msg = ipmi_alloc_recv_msg(); | 3807 | recv_msg = ipmi_alloc_recv_msg(); |
3885 | if (!recv_msg) { | 3808 | if (!recv_msg) { |
3886 | /* | 3809 | /* |
@@ -3916,10 +3839,12 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3916 | * at the end also needs to be removed. | 3839 | * at the end also needs to be removed. |
3917 | */ | 3840 | */ |
3918 | recv_msg->msg.data_len = msg->rsp_size - 12; | 3841 | recv_msg->msg.data_len = msg->rsp_size - 12; |
3919 | memcpy(recv_msg->msg_data, | 3842 | memcpy(recv_msg->msg_data, &msg->rsp[11], |
3920 | &(msg->rsp[11]), | ||
3921 | msg->rsp_size - 12); | 3843 | msg->rsp_size - 12); |
3922 | deliver_response(recv_msg); | 3844 | if (deliver_response(intf, recv_msg)) |
3845 | ipmi_inc_stat(intf, unhandled_commands); | ||
3846 | else | ||
3847 | ipmi_inc_stat(intf, handled_commands); | ||
3923 | } | 3848 | } |
3924 | } | 3849 | } |
3925 | 3850 | ||
@@ -3932,7 +3857,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3932 | * the OEM. See IPMI 2.0 specification, Chapter 6 and | 3857 | * the OEM. See IPMI 2.0 specification, Chapter 6 and |
3933 | * Chapter 22, sections 22.6 and 22.24 for more details. | 3858 | * Chapter 22, sections 22.6 and 22.24 for more details. |
3934 | */ | 3859 | */ |
3935 | static int handle_oem_get_msg_cmd(ipmi_smi_t intf, | 3860 | static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, |
3936 | struct ipmi_smi_msg *msg) | 3861 | struct ipmi_smi_msg *msg) |
3937 | { | 3862 | { |
3938 | struct cmd_rcvr *rcvr; | 3863 | struct cmd_rcvr *rcvr; |
@@ -3940,7 +3865,7 @@ static int handle_oem_get_msg_cmd(ipmi_smi_t intf, | |||
3940 | unsigned char netfn; | 3865 | unsigned char netfn; |
3941 | unsigned char cmd; | 3866 | unsigned char cmd; |
3942 | unsigned char chan; | 3867 | unsigned char chan; |
3943 | ipmi_user_t user = NULL; | 3868 | struct ipmi_user *user = NULL; |
3944 | struct ipmi_system_interface_addr *smi_addr; | 3869 | struct ipmi_system_interface_addr *smi_addr; |
3945 | struct ipmi_recv_msg *recv_msg; | 3870 | struct ipmi_recv_msg *recv_msg; |
3946 | 3871 | ||
@@ -3987,9 +3912,6 @@ static int handle_oem_get_msg_cmd(ipmi_smi_t intf, | |||
3987 | 3912 | ||
3988 | rv = 0; | 3913 | rv = 0; |
3989 | } else { | 3914 | } else { |
3990 | /* Deliver the message to the user. */ | ||
3991 | ipmi_inc_stat(intf, handled_commands); | ||
3992 | |||
3993 | recv_msg = ipmi_alloc_recv_msg(); | 3915 | recv_msg = ipmi_alloc_recv_msg(); |
3994 | if (!recv_msg) { | 3916 | if (!recv_msg) { |
3995 | /* | 3917 | /* |
@@ -4007,7 +3929,7 @@ static int handle_oem_get_msg_cmd(ipmi_smi_t intf, | |||
4007 | * requirements | 3929 | * requirements |
4008 | */ | 3930 | */ |
4009 | smi_addr = ((struct ipmi_system_interface_addr *) | 3931 | smi_addr = ((struct ipmi_system_interface_addr *) |
4010 | &(recv_msg->addr)); | 3932 | &recv_msg->addr); |
4011 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 3933 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
4012 | smi_addr->channel = IPMI_BMC_CHANNEL; | 3934 | smi_addr->channel = IPMI_BMC_CHANNEL; |
4013 | smi_addr->lun = msg->rsp[0] & 3; | 3935 | smi_addr->lun = msg->rsp[0] & 3; |
@@ -4024,10 +3946,12 @@ static int handle_oem_get_msg_cmd(ipmi_smi_t intf, | |||
4024 | * the Channel Byte in the "GET MESSAGE" command | 3946 | * the Channel Byte in the "GET MESSAGE" command |
4025 | */ | 3947 | */ |
4026 | recv_msg->msg.data_len = msg->rsp_size - 4; | 3948 | recv_msg->msg.data_len = msg->rsp_size - 4; |
4027 | memcpy(recv_msg->msg_data, | 3949 | memcpy(recv_msg->msg_data, &msg->rsp[4], |
4028 | &(msg->rsp[4]), | ||
4029 | msg->rsp_size - 4); | 3950 | msg->rsp_size - 4); |
4030 | deliver_response(recv_msg); | 3951 | if (deliver_response(intf, recv_msg)) |
3952 | ipmi_inc_stat(intf, unhandled_commands); | ||
3953 | else | ||
3954 | ipmi_inc_stat(intf, handled_commands); | ||
4031 | } | 3955 | } |
4032 | } | 3956 | } |
4033 | 3957 | ||
@@ -4040,26 +3964,25 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, | |||
4040 | struct ipmi_system_interface_addr *smi_addr; | 3964 | struct ipmi_system_interface_addr *smi_addr; |
4041 | 3965 | ||
4042 | recv_msg->msgid = 0; | 3966 | recv_msg->msgid = 0; |
4043 | smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); | 3967 | smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; |
4044 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 3968 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
4045 | smi_addr->channel = IPMI_BMC_CHANNEL; | 3969 | smi_addr->channel = IPMI_BMC_CHANNEL; |
4046 | smi_addr->lun = msg->rsp[0] & 3; | 3970 | smi_addr->lun = msg->rsp[0] & 3; |
4047 | recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; | 3971 | recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; |
4048 | recv_msg->msg.netfn = msg->rsp[0] >> 2; | 3972 | recv_msg->msg.netfn = msg->rsp[0] >> 2; |
4049 | recv_msg->msg.cmd = msg->rsp[1]; | 3973 | recv_msg->msg.cmd = msg->rsp[1]; |
4050 | memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3); | 3974 | memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); |
4051 | recv_msg->msg.data = recv_msg->msg_data; | 3975 | recv_msg->msg.data = recv_msg->msg_data; |
4052 | recv_msg->msg.data_len = msg->rsp_size - 3; | 3976 | recv_msg->msg.data_len = msg->rsp_size - 3; |
4053 | } | 3977 | } |
4054 | 3978 | ||
4055 | static int handle_read_event_rsp(ipmi_smi_t intf, | 3979 | static int handle_read_event_rsp(struct ipmi_smi *intf, |
4056 | struct ipmi_smi_msg *msg) | 3980 | struct ipmi_smi_msg *msg) |
4057 | { | 3981 | { |
4058 | struct ipmi_recv_msg *recv_msg, *recv_msg2; | 3982 | struct ipmi_recv_msg *recv_msg, *recv_msg2; |
4059 | struct list_head msgs; | 3983 | struct list_head msgs; |
4060 | ipmi_user_t user; | 3984 | struct ipmi_user *user; |
4061 | int rv = 0; | 3985 | int rv = 0, deliver_count = 0, index; |
4062 | int deliver_count = 0; | ||
4063 | unsigned long flags; | 3986 | unsigned long flags; |
4064 | 3987 | ||
4065 | if (msg->rsp_size < 19) { | 3988 | if (msg->rsp_size < 19) { |
@@ -4083,7 +4006,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
4083 | * Allocate and fill in one message for every user that is | 4006 | * Allocate and fill in one message for every user that is |
4084 | * getting events. | 4007 | * getting events. |
4085 | */ | 4008 | */ |
4086 | rcu_read_lock(); | 4009 | index = srcu_read_lock(&intf->users_srcu); |
4087 | list_for_each_entry_rcu(user, &intf->users, link) { | 4010 | list_for_each_entry_rcu(user, &intf->users, link) { |
4088 | if (!user->gets_events) | 4011 | if (!user->gets_events) |
4089 | continue; | 4012 | continue; |
@@ -4110,15 +4033,15 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
4110 | copy_event_into_recv_msg(recv_msg, msg); | 4033 | copy_event_into_recv_msg(recv_msg, msg); |
4111 | recv_msg->user = user; | 4034 | recv_msg->user = user; |
4112 | kref_get(&user->refcount); | 4035 | kref_get(&user->refcount); |
4113 | list_add_tail(&(recv_msg->link), &msgs); | 4036 | list_add_tail(&recv_msg->link, &msgs); |
4114 | } | 4037 | } |
4115 | rcu_read_unlock(); | 4038 | srcu_read_unlock(&intf->users_srcu, index); |
4116 | 4039 | ||
4117 | if (deliver_count) { | 4040 | if (deliver_count) { |
4118 | /* Now deliver all the messages. */ | 4041 | /* Now deliver all the messages. */ |
4119 | list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { | 4042 | list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { |
4120 | list_del(&recv_msg->link); | 4043 | list_del(&recv_msg->link); |
4121 | deliver_response(recv_msg); | 4044 | deliver_local_response(intf, recv_msg); |
4122 | } | 4045 | } |
4123 | } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { | 4046 | } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { |
4124 | /* | 4047 | /* |
@@ -4137,7 +4060,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
4137 | } | 4060 | } |
4138 | 4061 | ||
4139 | copy_event_into_recv_msg(recv_msg, msg); | 4062 | copy_event_into_recv_msg(recv_msg, msg); |
4140 | list_add_tail(&(recv_msg->link), &(intf->waiting_events)); | 4063 | list_add_tail(&recv_msg->link, &intf->waiting_events); |
4141 | intf->waiting_events_count++; | 4064 | intf->waiting_events_count++; |
4142 | } else if (!intf->event_msg_printed) { | 4065 | } else if (!intf->event_msg_printed) { |
4143 | /* | 4066 | /* |
@@ -4150,16 +4073,16 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
4150 | } | 4073 | } |
4151 | 4074 | ||
4152 | out: | 4075 | out: |
4153 | spin_unlock_irqrestore(&(intf->events_lock), flags); | 4076 | spin_unlock_irqrestore(&intf->events_lock, flags); |
4154 | 4077 | ||
4155 | return rv; | 4078 | return rv; |
4156 | } | 4079 | } |
4157 | 4080 | ||
4158 | static int handle_bmc_rsp(ipmi_smi_t intf, | 4081 | static int handle_bmc_rsp(struct ipmi_smi *intf, |
4159 | struct ipmi_smi_msg *msg) | 4082 | struct ipmi_smi_msg *msg) |
4160 | { | 4083 | { |
4161 | struct ipmi_recv_msg *recv_msg; | 4084 | struct ipmi_recv_msg *recv_msg; |
4162 | struct ipmi_user *user; | 4085 | struct ipmi_system_interface_addr *smi_addr; |
4163 | 4086 | ||
4164 | recv_msg = (struct ipmi_recv_msg *) msg->user_data; | 4087 | recv_msg = (struct ipmi_recv_msg *) msg->user_data; |
4165 | if (recv_msg == NULL) { | 4088 | if (recv_msg == NULL) { |
@@ -4168,32 +4091,19 @@ static int handle_bmc_rsp(ipmi_smi_t intf, | |||
4168 | return 0; | 4091 | return 0; |
4169 | } | 4092 | } |
4170 | 4093 | ||
4171 | user = recv_msg->user; | 4094 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
4172 | /* Make sure the user still exists. */ | 4095 | recv_msg->msgid = msg->msgid; |
4173 | if (user && !user->valid) { | 4096 | smi_addr = ((struct ipmi_system_interface_addr *) |
4174 | /* The user for the message went away, so give up. */ | 4097 | &recv_msg->addr); |
4175 | ipmi_inc_stat(intf, unhandled_local_responses); | 4098 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
4176 | ipmi_free_recv_msg(recv_msg); | 4099 | smi_addr->channel = IPMI_BMC_CHANNEL; |
4177 | } else { | 4100 | smi_addr->lun = msg->rsp[0] & 3; |
4178 | struct ipmi_system_interface_addr *smi_addr; | 4101 | recv_msg->msg.netfn = msg->rsp[0] >> 2; |
4179 | 4102 | recv_msg->msg.cmd = msg->rsp[1]; | |
4180 | ipmi_inc_stat(intf, handled_local_responses); | 4103 | memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); |
4181 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 4104 | recv_msg->msg.data = recv_msg->msg_data; |
4182 | recv_msg->msgid = msg->msgid; | 4105 | recv_msg->msg.data_len = msg->rsp_size - 2; |
4183 | smi_addr = ((struct ipmi_system_interface_addr *) | 4106 | deliver_local_response(intf, recv_msg); |
4184 | &(recv_msg->addr)); | ||
4185 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | ||
4186 | smi_addr->channel = IPMI_BMC_CHANNEL; | ||
4187 | smi_addr->lun = msg->rsp[0] & 3; | ||
4188 | recv_msg->msg.netfn = msg->rsp[0] >> 2; | ||
4189 | recv_msg->msg.cmd = msg->rsp[1]; | ||
4190 | memcpy(recv_msg->msg_data, | ||
4191 | &(msg->rsp[2]), | ||
4192 | msg->rsp_size - 2); | ||
4193 | recv_msg->msg.data = recv_msg->msg_data; | ||
4194 | recv_msg->msg.data_len = msg->rsp_size - 2; | ||
4195 | deliver_response(recv_msg); | ||
4196 | } | ||
4197 | 4107 | ||
4198 | return 0; | 4108 | return 0; |
4199 | } | 4109 | } |
@@ -4203,19 +4113,13 @@ static int handle_bmc_rsp(ipmi_smi_t intf, | |||
4203 | * 0 if the message should be freed, or -1 if the message should not | 4113 | * 0 if the message should be freed, or -1 if the message should not |
4204 | * be freed or requeued. | 4114 | * be freed or requeued. |
4205 | */ | 4115 | */ |
4206 | static int handle_one_recv_msg(ipmi_smi_t intf, | 4116 | static int handle_one_recv_msg(struct ipmi_smi *intf, |
4207 | struct ipmi_smi_msg *msg) | 4117 | struct ipmi_smi_msg *msg) |
4208 | { | 4118 | { |
4209 | int requeue; | 4119 | int requeue; |
4210 | int chan; | 4120 | int chan; |
4211 | 4121 | ||
4212 | #ifdef DEBUG_MSGING | 4122 | ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size); |
4213 | int m; | ||
4214 | printk("Recv:"); | ||
4215 | for (m = 0; m < msg->rsp_size; m++) | ||
4216 | printk(" %2.2x", msg->rsp[m]); | ||
4217 | printk("\n"); | ||
4218 | #endif | ||
4219 | if (msg->rsp_size < 2) { | 4123 | if (msg->rsp_size < 2) { |
4220 | /* Message is too small to be correct. */ | 4124 | /* Message is too small to be correct. */ |
4221 | dev_warn(intf->si_dev, | 4125 | dev_warn(intf->si_dev, |
@@ -4252,7 +4156,7 @@ static int handle_one_recv_msg(ipmi_smi_t intf, | |||
4252 | * It's a response to a response we sent. For this we | 4156 | * It's a response to a response we sent. For this we |
4253 | * deliver a send message response to the user. | 4157 | * deliver a send message response to the user. |
4254 | */ | 4158 | */ |
4255 | struct ipmi_recv_msg *recv_msg = msg->user_data; | 4159 | struct ipmi_recv_msg *recv_msg = msg->user_data; |
4256 | 4160 | ||
4257 | requeue = 0; | 4161 | requeue = 0; |
4258 | if (msg->rsp_size < 2) | 4162 | if (msg->rsp_size < 2) |
@@ -4267,15 +4171,11 @@ static int handle_one_recv_msg(ipmi_smi_t intf, | |||
4267 | if (!recv_msg) | 4171 | if (!recv_msg) |
4268 | goto out; | 4172 | goto out; |
4269 | 4173 | ||
4270 | /* Make sure the user still exists. */ | ||
4271 | if (!recv_msg->user || !recv_msg->user->valid) | ||
4272 | goto out; | ||
4273 | |||
4274 | recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; | 4174 | recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; |
4275 | recv_msg->msg.data = recv_msg->msg_data; | 4175 | recv_msg->msg.data = recv_msg->msg_data; |
4276 | recv_msg->msg.data_len = 1; | 4176 | recv_msg->msg.data_len = 1; |
4277 | recv_msg->msg_data[0] = msg->rsp[2]; | 4177 | recv_msg->msg_data[0] = msg->rsp[2]; |
4278 | deliver_response(recv_msg); | 4178 | deliver_local_response(intf, recv_msg); |
4279 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) | 4179 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) |
4280 | && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { | 4180 | && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { |
4281 | struct ipmi_channel *chans; | 4181 | struct ipmi_channel *chans; |
@@ -4367,7 +4267,7 @@ static int handle_one_recv_msg(ipmi_smi_t intf, | |||
4367 | /* | 4267 | /* |
4368 | * If there are messages in the queue or pretimeouts, handle them. | 4268 | * If there are messages in the queue or pretimeouts, handle them. |
4369 | */ | 4269 | */ |
4370 | static void handle_new_recv_msgs(ipmi_smi_t intf) | 4270 | static void handle_new_recv_msgs(struct ipmi_smi *intf) |
4371 | { | 4271 | { |
4372 | struct ipmi_smi_msg *smi_msg; | 4272 | struct ipmi_smi_msg *smi_msg; |
4373 | unsigned long flags = 0; | 4273 | unsigned long flags = 0; |
@@ -4412,22 +4312,23 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) | |||
4412 | * deliver pretimeouts to all the users. | 4312 | * deliver pretimeouts to all the users. |
4413 | */ | 4313 | */ |
4414 | if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { | 4314 | if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { |
4415 | ipmi_user_t user; | 4315 | struct ipmi_user *user; |
4316 | int index; | ||
4416 | 4317 | ||
4417 | rcu_read_lock(); | 4318 | index = srcu_read_lock(&intf->users_srcu); |
4418 | list_for_each_entry_rcu(user, &intf->users, link) { | 4319 | list_for_each_entry_rcu(user, &intf->users, link) { |
4419 | if (user->handler->ipmi_watchdog_pretimeout) | 4320 | if (user->handler->ipmi_watchdog_pretimeout) |
4420 | user->handler->ipmi_watchdog_pretimeout( | 4321 | user->handler->ipmi_watchdog_pretimeout( |
4421 | user->handler_data); | 4322 | user->handler_data); |
4422 | } | 4323 | } |
4423 | rcu_read_unlock(); | 4324 | srcu_read_unlock(&intf->users_srcu, index); |
4424 | } | 4325 | } |
4425 | } | 4326 | } |
4426 | 4327 | ||
4427 | static void smi_recv_tasklet(unsigned long val) | 4328 | static void smi_recv_tasklet(unsigned long val) |
4428 | { | 4329 | { |
4429 | unsigned long flags = 0; /* keep us warning-free. */ | 4330 | unsigned long flags = 0; /* keep us warning-free. */ |
4430 | ipmi_smi_t intf = (ipmi_smi_t) val; | 4331 | struct ipmi_smi *intf = (struct ipmi_smi *) val; |
4431 | int run_to_completion = intf->run_to_completion; | 4332 | int run_to_completion = intf->run_to_completion; |
4432 | struct ipmi_smi_msg *newmsg = NULL; | 4333 | struct ipmi_smi_msg *newmsg = NULL; |
4433 | 4334 | ||
@@ -4469,7 +4370,7 @@ static void smi_recv_tasklet(unsigned long val) | |||
4469 | } | 4370 | } |
4470 | 4371 | ||
4471 | /* Handle a new message from the lower layer. */ | 4372 | /* Handle a new message from the lower layer. */ |
4472 | void ipmi_smi_msg_received(ipmi_smi_t intf, | 4373 | void ipmi_smi_msg_received(struct ipmi_smi *intf, |
4473 | struct ipmi_smi_msg *msg) | 4374 | struct ipmi_smi_msg *msg) |
4474 | { | 4375 | { |
4475 | unsigned long flags = 0; /* keep us warning-free. */ | 4376 | unsigned long flags = 0; /* keep us warning-free. */ |
@@ -4550,7 +4451,7 @@ free_msg: | |||
4550 | } | 4451 | } |
4551 | EXPORT_SYMBOL(ipmi_smi_msg_received); | 4452 | EXPORT_SYMBOL(ipmi_smi_msg_received); |
4552 | 4453 | ||
4553 | void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) | 4454 | void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) |
4554 | { | 4455 | { |
4555 | if (intf->in_shutdown) | 4456 | if (intf->in_shutdown) |
4556 | return; | 4457 | return; |
@@ -4561,7 +4462,7 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) | |||
4561 | EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); | 4462 | EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); |
4562 | 4463 | ||
4563 | static struct ipmi_smi_msg * | 4464 | static struct ipmi_smi_msg * |
4564 | smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, | 4465 | smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, |
4565 | unsigned char seq, long seqid) | 4466 | unsigned char seq, long seqid) |
4566 | { | 4467 | { |
4567 | struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); | 4468 | struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); |
@@ -4576,26 +4477,18 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, | |||
4576 | smi_msg->data_size = recv_msg->msg.data_len; | 4477 | smi_msg->data_size = recv_msg->msg.data_len; |
4577 | smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); | 4478 | smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); |
4578 | 4479 | ||
4579 | #ifdef DEBUG_MSGING | 4480 | ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size); |
4580 | { | 4481 | |
4581 | int m; | ||
4582 | printk("Resend: "); | ||
4583 | for (m = 0; m < smi_msg->data_size; m++) | ||
4584 | printk(" %2.2x", smi_msg->data[m]); | ||
4585 | printk("\n"); | ||
4586 | } | ||
4587 | #endif | ||
4588 | return smi_msg; | 4482 | return smi_msg; |
4589 | } | 4483 | } |
4590 | 4484 | ||
4591 | static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | 4485 | static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, |
4592 | struct list_head *timeouts, | 4486 | struct list_head *timeouts, |
4593 | unsigned long timeout_period, | 4487 | unsigned long timeout_period, |
4594 | int slot, unsigned long *flags, | 4488 | int slot, unsigned long *flags, |
4595 | unsigned int *waiting_msgs) | 4489 | unsigned int *waiting_msgs) |
4596 | { | 4490 | { |
4597 | struct ipmi_recv_msg *msg; | 4491 | struct ipmi_recv_msg *msg; |
4598 | const struct ipmi_smi_handlers *handlers; | ||
4599 | 4492 | ||
4600 | if (intf->in_shutdown) | 4493 | if (intf->in_shutdown) |
4601 | return; | 4494 | return; |
@@ -4653,8 +4546,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
4653 | * only for messages to the local MC, which don't get | 4546 | * only for messages to the local MC, which don't get |
4654 | * resent. | 4547 | * resent. |
4655 | */ | 4548 | */ |
4656 | handlers = intf->handlers; | 4549 | if (intf->handlers) { |
4657 | if (handlers) { | ||
4658 | if (is_lan_addr(&ent->recv_msg->addr)) | 4550 | if (is_lan_addr(&ent->recv_msg->addr)) |
4659 | ipmi_inc_stat(intf, | 4551 | ipmi_inc_stat(intf, |
4660 | retransmitted_lan_commands); | 4552 | retransmitted_lan_commands); |
@@ -4662,7 +4554,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
4662 | ipmi_inc_stat(intf, | 4554 | ipmi_inc_stat(intf, |
4663 | retransmitted_ipmb_commands); | 4555 | retransmitted_ipmb_commands); |
4664 | 4556 | ||
4665 | smi_send(intf, handlers, smi_msg, 0); | 4557 | smi_send(intf, intf->handlers, smi_msg, 0); |
4666 | } else | 4558 | } else |
4667 | ipmi_free_smi_msg(smi_msg); | 4559 | ipmi_free_smi_msg(smi_msg); |
4668 | 4560 | ||
@@ -4670,7 +4562,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
4670 | } | 4562 | } |
4671 | } | 4563 | } |
4672 | 4564 | ||
4673 | static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, | 4565 | static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, |
4674 | unsigned long timeout_period) | 4566 | unsigned long timeout_period) |
4675 | { | 4567 | { |
4676 | struct list_head timeouts; | 4568 | struct list_head timeouts; |
@@ -4694,14 +4586,20 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, | |||
4694 | */ | 4586 | */ |
4695 | INIT_LIST_HEAD(&timeouts); | 4587 | INIT_LIST_HEAD(&timeouts); |
4696 | spin_lock_irqsave(&intf->seq_lock, flags); | 4588 | spin_lock_irqsave(&intf->seq_lock, flags); |
4589 | if (intf->ipmb_maintenance_mode_timeout) { | ||
4590 | if (intf->ipmb_maintenance_mode_timeout <= timeout_period) | ||
4591 | intf->ipmb_maintenance_mode_timeout = 0; | ||
4592 | else | ||
4593 | intf->ipmb_maintenance_mode_timeout -= timeout_period; | ||
4594 | } | ||
4697 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) | 4595 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) |
4698 | check_msg_timeout(intf, &(intf->seq_table[i]), | 4596 | check_msg_timeout(intf, &intf->seq_table[i], |
4699 | &timeouts, timeout_period, i, | 4597 | &timeouts, timeout_period, i, |
4700 | &flags, &waiting_msgs); | 4598 | &flags, &waiting_msgs); |
4701 | spin_unlock_irqrestore(&intf->seq_lock, flags); | 4599 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
4702 | 4600 | ||
4703 | list_for_each_entry_safe(msg, msg2, &timeouts, link) | 4601 | list_for_each_entry_safe(msg, msg2, &timeouts, link) |
4704 | deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); | 4602 | deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); |
4705 | 4603 | ||
4706 | /* | 4604 | /* |
4707 | * Maintenance mode handling. Check the timeout | 4605 | * Maintenance mode handling. Check the timeout |
@@ -4731,7 +4629,7 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, | |||
4731 | return waiting_msgs; | 4629 | return waiting_msgs; |
4732 | } | 4630 | } |
4733 | 4631 | ||
4734 | static void ipmi_request_event(ipmi_smi_t intf) | 4632 | static void ipmi_request_event(struct ipmi_smi *intf) |
4735 | { | 4633 | { |
4736 | /* No event requests when in maintenance mode. */ | 4634 | /* No event requests when in maintenance mode. */ |
4737 | if (intf->maintenance_mode_enable) | 4635 | if (intf->maintenance_mode_enable) |
@@ -4747,13 +4645,13 @@ static atomic_t stop_operation; | |||
4747 | 4645 | ||
4748 | static void ipmi_timeout(struct timer_list *unused) | 4646 | static void ipmi_timeout(struct timer_list *unused) |
4749 | { | 4647 | { |
4750 | ipmi_smi_t intf; | 4648 | struct ipmi_smi *intf; |
4751 | int nt = 0; | 4649 | int nt = 0, index; |
4752 | 4650 | ||
4753 | if (atomic_read(&stop_operation)) | 4651 | if (atomic_read(&stop_operation)) |
4754 | return; | 4652 | return; |
4755 | 4653 | ||
4756 | rcu_read_lock(); | 4654 | index = srcu_read_lock(&ipmi_interfaces_srcu); |
4757 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 4655 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
4758 | int lnt = 0; | 4656 | int lnt = 0; |
4759 | 4657 | ||
@@ -4776,13 +4674,13 @@ static void ipmi_timeout(struct timer_list *unused) | |||
4776 | 4674 | ||
4777 | nt += lnt; | 4675 | nt += lnt; |
4778 | } | 4676 | } |
4779 | rcu_read_unlock(); | 4677 | srcu_read_unlock(&ipmi_interfaces_srcu, index); |
4780 | 4678 | ||
4781 | if (nt) | 4679 | if (nt) |
4782 | mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); | 4680 | mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); |
4783 | } | 4681 | } |
4784 | 4682 | ||
4785 | static void need_waiter(ipmi_smi_t intf) | 4683 | static void need_waiter(struct ipmi_smi *intf) |
4786 | { | 4684 | { |
4787 | /* Racy, but worst case we start the timer twice. */ | 4685 | /* Racy, but worst case we start the timer twice. */ |
4788 | if (!timer_pending(&ipmi_timer)) | 4686 | if (!timer_pending(&ipmi_timer)) |
@@ -4853,8 +4751,8 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) | |||
4853 | /* | 4751 | /* |
4854 | * Inside a panic, send a message and wait for a response. | 4752 | * Inside a panic, send a message and wait for a response. |
4855 | */ | 4753 | */ |
4856 | static void ipmi_panic_request_and_wait(ipmi_smi_t intf, | 4754 | static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, |
4857 | struct ipmi_addr *addr, | 4755 | struct ipmi_addr *addr, |
4858 | struct kernel_ipmi_msg *msg) | 4756 | struct kernel_ipmi_msg *msg) |
4859 | { | 4757 | { |
4860 | struct ipmi_smi_msg smi_msg; | 4758 | struct ipmi_smi_msg smi_msg; |
@@ -4885,7 +4783,8 @@ static void ipmi_panic_request_and_wait(ipmi_smi_t intf, | |||
4885 | ipmi_poll(intf); | 4783 | ipmi_poll(intf); |
4886 | } | 4784 | } |
4887 | 4785 | ||
4888 | static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | 4786 | static void event_receiver_fetcher(struct ipmi_smi *intf, |
4787 | struct ipmi_recv_msg *msg) | ||
4889 | { | 4788 | { |
4890 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) | 4789 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) |
4891 | && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) | 4790 | && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) |
@@ -4897,7 +4796,7 @@ static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
4897 | } | 4796 | } |
4898 | } | 4797 | } |
4899 | 4798 | ||
4900 | static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | 4799 | static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) |
4901 | { | 4800 | { |
4902 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) | 4801 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) |
4903 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) | 4802 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) |
@@ -4912,13 +4811,15 @@ static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
4912 | } | 4811 | } |
4913 | } | 4812 | } |
4914 | 4813 | ||
4915 | static void send_panic_events(char *str) | 4814 | static void send_panic_events(struct ipmi_smi *intf, char *str) |
4916 | { | 4815 | { |
4917 | struct kernel_ipmi_msg msg; | 4816 | struct kernel_ipmi_msg msg; |
4918 | ipmi_smi_t intf; | 4817 | unsigned char data[16]; |
4919 | unsigned char data[16]; | ||
4920 | struct ipmi_system_interface_addr *si; | 4818 | struct ipmi_system_interface_addr *si; |
4921 | struct ipmi_addr addr; | 4819 | struct ipmi_addr addr; |
4820 | char *p = str; | ||
4821 | struct ipmi_ipmb_addr *ipmb; | ||
4822 | int j; | ||
4922 | 4823 | ||
4923 | if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) | 4824 | if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) |
4924 | return; | 4825 | return; |
@@ -4949,15 +4850,8 @@ static void send_panic_events(char *str) | |||
4949 | data[7] = str[2]; | 4850 | data[7] = str[2]; |
4950 | } | 4851 | } |
4951 | 4852 | ||
4952 | /* For every registered interface, send the event. */ | 4853 | /* Send the event announcing the panic. */ |
4953 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 4854 | ipmi_panic_request_and_wait(intf, &addr, &msg); |
4954 | if (!intf->handlers || !intf->handlers->poll) | ||
4955 | /* Interface is not ready or can't run at panic time. */ | ||
4956 | continue; | ||
4957 | |||
4958 | /* Send the event announcing the panic. */ | ||
4959 | ipmi_panic_request_and_wait(intf, &addr, &msg); | ||
4960 | } | ||
4961 | 4855 | ||
4962 | /* | 4856 | /* |
4963 | * On every interface, dump a bunch of OEM event holding the | 4857 | * On every interface, dump a bunch of OEM event holding the |
@@ -4966,111 +4860,100 @@ static void send_panic_events(char *str) | |||
4966 | if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) | 4860 | if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) |
4967 | return; | 4861 | return; |
4968 | 4862 | ||
4969 | /* For every registered interface, send the event. */ | 4863 | /* |
4970 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 4864 | * intf_num is used as an marker to tell if the |
4971 | char *p = str; | 4865 | * interface is valid. Thus we need a read barrier to |
4972 | struct ipmi_ipmb_addr *ipmb; | 4866 | * make sure data fetched before checking intf_num |
4973 | int j; | 4867 | * won't be used. |
4974 | 4868 | */ | |
4975 | if (intf->intf_num == -1) | 4869 | smp_rmb(); |
4976 | /* Interface was not ready yet. */ | ||
4977 | continue; | ||
4978 | 4870 | ||
4979 | /* | 4871 | /* |
4980 | * intf_num is used as an marker to tell if the | 4872 | * First job here is to figure out where to send the |
4981 | * interface is valid. Thus we need a read barrier to | 4873 | * OEM events. There's no way in IPMI to send OEM |
4982 | * make sure data fetched before checking intf_num | 4874 | * events using an event send command, so we have to |
4983 | * won't be used. | 4875 | * find the SEL to put them in and stick them in |
4984 | */ | 4876 | * there. |
4985 | smp_rmb(); | 4877 | */ |
4986 | 4878 | ||
4987 | /* | 4879 | /* Get capabilities from the get device id. */ |
4988 | * First job here is to figure out where to send the | 4880 | intf->local_sel_device = 0; |
4989 | * OEM events. There's no way in IPMI to send OEM | 4881 | intf->local_event_generator = 0; |
4990 | * events using an event send command, so we have to | 4882 | intf->event_receiver = 0; |
4991 | * find the SEL to put them in and stick them in | ||
4992 | * there. | ||
4993 | */ | ||
4994 | 4883 | ||
4995 | /* Get capabilities from the get device id. */ | 4884 | /* Request the device info from the local MC. */ |
4996 | intf->local_sel_device = 0; | 4885 | msg.netfn = IPMI_NETFN_APP_REQUEST; |
4997 | intf->local_event_generator = 0; | 4886 | msg.cmd = IPMI_GET_DEVICE_ID_CMD; |
4998 | intf->event_receiver = 0; | 4887 | msg.data = NULL; |
4888 | msg.data_len = 0; | ||
4889 | intf->null_user_handler = device_id_fetcher; | ||
4890 | ipmi_panic_request_and_wait(intf, &addr, &msg); | ||
4999 | 4891 | ||
5000 | /* Request the device info from the local MC. */ | 4892 | if (intf->local_event_generator) { |
5001 | msg.netfn = IPMI_NETFN_APP_REQUEST; | 4893 | /* Request the event receiver from the local MC. */ |
5002 | msg.cmd = IPMI_GET_DEVICE_ID_CMD; | 4894 | msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; |
4895 | msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; | ||
5003 | msg.data = NULL; | 4896 | msg.data = NULL; |
5004 | msg.data_len = 0; | 4897 | msg.data_len = 0; |
5005 | intf->null_user_handler = device_id_fetcher; | 4898 | intf->null_user_handler = event_receiver_fetcher; |
5006 | ipmi_panic_request_and_wait(intf, &addr, &msg); | 4899 | ipmi_panic_request_and_wait(intf, &addr, &msg); |
4900 | } | ||
4901 | intf->null_user_handler = NULL; | ||
5007 | 4902 | ||
5008 | if (intf->local_event_generator) { | 4903 | /* |
5009 | /* Request the event receiver from the local MC. */ | 4904 | * Validate the event receiver. The low bit must not |
5010 | msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; | 4905 | * be 1 (it must be a valid IPMB address), it cannot |
5011 | msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; | 4906 | * be zero, and it must not be my address. |
5012 | msg.data = NULL; | 4907 | */ |
5013 | msg.data_len = 0; | 4908 | if (((intf->event_receiver & 1) == 0) |
5014 | intf->null_user_handler = event_receiver_fetcher; | 4909 | && (intf->event_receiver != 0) |
5015 | ipmi_panic_request_and_wait(intf, &addr, &msg); | 4910 | && (intf->event_receiver != intf->addrinfo[0].address)) { |
5016 | } | 4911 | /* |
5017 | intf->null_user_handler = NULL; | 4912 | * The event receiver is valid, send an IPMB |
4913 | * message. | ||
4914 | */ | ||
4915 | ipmb = (struct ipmi_ipmb_addr *) &addr; | ||
4916 | ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; | ||
4917 | ipmb->channel = 0; /* FIXME - is this right? */ | ||
4918 | ipmb->lun = intf->event_receiver_lun; | ||
4919 | ipmb->slave_addr = intf->event_receiver; | ||
4920 | } else if (intf->local_sel_device) { | ||
4921 | /* | ||
4922 | * The event receiver was not valid (or was | ||
4923 | * me), but I am an SEL device, just dump it | ||
4924 | * in my SEL. | ||
4925 | */ | ||
4926 | si = (struct ipmi_system_interface_addr *) &addr; | ||
4927 | si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | ||
4928 | si->channel = IPMI_BMC_CHANNEL; | ||
4929 | si->lun = 0; | ||
4930 | } else | ||
4931 | return; /* No where to send the event. */ | ||
5018 | 4932 | ||
4933 | msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ | ||
4934 | msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; | ||
4935 | msg.data = data; | ||
4936 | msg.data_len = 16; | ||
4937 | |||
4938 | j = 0; | ||
4939 | while (*p) { | ||
4940 | int size = strlen(p); | ||
4941 | |||
4942 | if (size > 11) | ||
4943 | size = 11; | ||
4944 | data[0] = 0; | ||
4945 | data[1] = 0; | ||
4946 | data[2] = 0xf0; /* OEM event without timestamp. */ | ||
4947 | data[3] = intf->addrinfo[0].address; | ||
4948 | data[4] = j++; /* sequence # */ | ||
5019 | /* | 4949 | /* |
5020 | * Validate the event receiver. The low bit must not | 4950 | * Always give 11 bytes, so strncpy will fill |
5021 | * be 1 (it must be a valid IPMB address), it cannot | 4951 | * it with zeroes for me. |
5022 | * be zero, and it must not be my address. | ||
5023 | */ | 4952 | */ |
5024 | if (((intf->event_receiver & 1) == 0) | 4953 | strncpy(data+5, p, 11); |
5025 | && (intf->event_receiver != 0) | 4954 | p += size; |
5026 | && (intf->event_receiver != intf->addrinfo[0].address)) { | ||
5027 | /* | ||
5028 | * The event receiver is valid, send an IPMB | ||
5029 | * message. | ||
5030 | */ | ||
5031 | ipmb = (struct ipmi_ipmb_addr *) &addr; | ||
5032 | ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; | ||
5033 | ipmb->channel = 0; /* FIXME - is this right? */ | ||
5034 | ipmb->lun = intf->event_receiver_lun; | ||
5035 | ipmb->slave_addr = intf->event_receiver; | ||
5036 | } else if (intf->local_sel_device) { | ||
5037 | /* | ||
5038 | * The event receiver was not valid (or was | ||
5039 | * me), but I am an SEL device, just dump it | ||
5040 | * in my SEL. | ||
5041 | */ | ||
5042 | si = (struct ipmi_system_interface_addr *) &addr; | ||
5043 | si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | ||
5044 | si->channel = IPMI_BMC_CHANNEL; | ||
5045 | si->lun = 0; | ||
5046 | } else | ||
5047 | continue; /* No where to send the event. */ | ||
5048 | |||
5049 | msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ | ||
5050 | msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; | ||
5051 | msg.data = data; | ||
5052 | msg.data_len = 16; | ||
5053 | |||
5054 | j = 0; | ||
5055 | while (*p) { | ||
5056 | int size = strlen(p); | ||
5057 | |||
5058 | if (size > 11) | ||
5059 | size = 11; | ||
5060 | data[0] = 0; | ||
5061 | data[1] = 0; | ||
5062 | data[2] = 0xf0; /* OEM event without timestamp. */ | ||
5063 | data[3] = intf->addrinfo[0].address; | ||
5064 | data[4] = j++; /* sequence # */ | ||
5065 | /* | ||
5066 | * Always give 11 bytes, so strncpy will fill | ||
5067 | * it with zeroes for me. | ||
5068 | */ | ||
5069 | strncpy(data+5, p, 11); | ||
5070 | p += size; | ||
5071 | 4955 | ||
5072 | ipmi_panic_request_and_wait(intf, &addr, &msg); | 4956 | ipmi_panic_request_and_wait(intf, &addr, &msg); |
5073 | } | ||
5074 | } | 4957 | } |
5075 | } | 4958 | } |
5076 | 4959 | ||
@@ -5080,7 +4963,8 @@ static int panic_event(struct notifier_block *this, | |||
5080 | unsigned long event, | 4963 | unsigned long event, |
5081 | void *ptr) | 4964 | void *ptr) |
5082 | { | 4965 | { |
5083 | ipmi_smi_t intf; | 4966 | struct ipmi_smi *intf; |
4967 | struct ipmi_user *user; | ||
5084 | 4968 | ||
5085 | if (has_panicked) | 4969 | if (has_panicked) |
5086 | return NOTIFY_DONE; | 4970 | return NOTIFY_DONE; |
@@ -5088,10 +4972,13 @@ static int panic_event(struct notifier_block *this, | |||
5088 | 4972 | ||
5089 | /* For every registered interface, set it to run to completion. */ | 4973 | /* For every registered interface, set it to run to completion. */ |
5090 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 4974 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
5091 | if (!intf->handlers) | 4975 | if (!intf->handlers || intf->intf_num == -1) |
5092 | /* Interface is not ready. */ | 4976 | /* Interface is not ready. */ |
5093 | continue; | 4977 | continue; |
5094 | 4978 | ||
4979 | if (!intf->handlers->poll) | ||
4980 | continue; | ||
4981 | |||
5095 | /* | 4982 | /* |
5096 | * If we were interrupted while locking xmit_msgs_lock or | 4983 | * If we were interrupted while locking xmit_msgs_lock or |
5097 | * waiting_rcv_msgs_lock, the corresponding list may be | 4984 | * waiting_rcv_msgs_lock, the corresponding list may be |
@@ -5113,9 +5000,15 @@ static int panic_event(struct notifier_block *this, | |||
5113 | if (intf->handlers->set_run_to_completion) | 5000 | if (intf->handlers->set_run_to_completion) |
5114 | intf->handlers->set_run_to_completion(intf->send_info, | 5001 | intf->handlers->set_run_to_completion(intf->send_info, |
5115 | 1); | 5002 | 1); |
5116 | } | ||
5117 | 5003 | ||
5118 | send_panic_events(ptr); | 5004 | list_for_each_entry_rcu(user, &intf->users, link) { |
5005 | if (user->handler->ipmi_panic_handler) | ||
5006 | user->handler->ipmi_panic_handler( | ||
5007 | user->handler_data); | ||
5008 | } | ||
5009 | |||
5010 | send_panic_events(intf, ptr); | ||
5011 | } | ||
5119 | 5012 | ||
5120 | return NOTIFY_DONE; | 5013 | return NOTIFY_DONE; |
5121 | } | 5014 | } |
@@ -5141,16 +5034,6 @@ static int ipmi_init_msghandler(void) | |||
5141 | 5034 | ||
5142 | pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n"); | 5035 | pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n"); |
5143 | 5036 | ||
5144 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
5145 | proc_ipmi_root = proc_mkdir("ipmi", NULL); | ||
5146 | if (!proc_ipmi_root) { | ||
5147 | pr_err(PFX "Unable to create IPMI proc dir"); | ||
5148 | driver_unregister(&ipmidriver.driver); | ||
5149 | return -ENOMEM; | ||
5150 | } | ||
5151 | |||
5152 | #endif /* CONFIG_IPMI_PROC_INTERFACE */ | ||
5153 | |||
5154 | timer_setup(&ipmi_timer, ipmi_timeout, 0); | 5037 | timer_setup(&ipmi_timer, ipmi_timeout, 0); |
5155 | mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); | 5038 | mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); |
5156 | 5039 | ||
@@ -5189,10 +5072,6 @@ static void __exit cleanup_ipmi(void) | |||
5189 | atomic_inc(&stop_operation); | 5072 | atomic_inc(&stop_operation); |
5190 | del_timer_sync(&ipmi_timer); | 5073 | del_timer_sync(&ipmi_timer); |
5191 | 5074 | ||
5192 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
5193 | proc_remove(proc_ipmi_root); | ||
5194 | #endif /* CONFIG_IPMI_PROC_INTERFACE */ | ||
5195 | |||
5196 | driver_unregister(&ipmidriver.driver); | 5075 | driver_unregister(&ipmidriver.driver); |
5197 | 5076 | ||
5198 | initialized = 0; | 5077 | initialized = 0; |
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index 7996337852f2..f6e19410dc57 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c | |||
@@ -39,9 +39,9 @@ static int ifnum_to_use = -1; | |||
39 | 39 | ||
40 | /* Our local state. */ | 40 | /* Our local state. */ |
41 | static int ready; | 41 | static int ready; |
42 | static ipmi_user_t ipmi_user; | 42 | static struct ipmi_user *ipmi_user; |
43 | static int ipmi_ifnum; | 43 | static int ipmi_ifnum; |
44 | static void (*specific_poweroff_func)(ipmi_user_t user); | 44 | static void (*specific_poweroff_func)(struct ipmi_user *user); |
45 | 45 | ||
46 | /* Holds the old poweroff function so we can restore it on removal. */ | 46 | /* Holds the old poweroff function so we can restore it on removal. */ |
47 | static void (*old_poweroff_func)(void); | 47 | static void (*old_poweroff_func)(void); |
@@ -118,7 +118,7 @@ static const struct ipmi_user_hndl ipmi_poweroff_handler = { | |||
118 | }; | 118 | }; |
119 | 119 | ||
120 | 120 | ||
121 | static int ipmi_request_wait_for_response(ipmi_user_t user, | 121 | static int ipmi_request_wait_for_response(struct ipmi_user *user, |
122 | struct ipmi_addr *addr, | 122 | struct ipmi_addr *addr, |
123 | struct kernel_ipmi_msg *send_msg) | 123 | struct kernel_ipmi_msg *send_msg) |
124 | { | 124 | { |
@@ -138,7 +138,7 @@ static int ipmi_request_wait_for_response(ipmi_user_t user, | |||
138 | } | 138 | } |
139 | 139 | ||
140 | /* Wait for message to complete, spinning. */ | 140 | /* Wait for message to complete, spinning. */ |
141 | static int ipmi_request_in_rc_mode(ipmi_user_t user, | 141 | static int ipmi_request_in_rc_mode(struct ipmi_user *user, |
142 | struct ipmi_addr *addr, | 142 | struct ipmi_addr *addr, |
143 | struct kernel_ipmi_msg *send_msg) | 143 | struct kernel_ipmi_msg *send_msg) |
144 | { | 144 | { |
@@ -178,9 +178,9 @@ static int ipmi_request_in_rc_mode(ipmi_user_t user, | |||
178 | #define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1 | 178 | #define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1 |
179 | #define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051 | 179 | #define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051 |
180 | 180 | ||
181 | static void (*atca_oem_poweroff_hook)(ipmi_user_t user); | 181 | static void (*atca_oem_poweroff_hook)(struct ipmi_user *user); |
182 | 182 | ||
183 | static void pps_poweroff_atca(ipmi_user_t user) | 183 | static void pps_poweroff_atca(struct ipmi_user *user) |
184 | { | 184 | { |
185 | struct ipmi_system_interface_addr smi_addr; | 185 | struct ipmi_system_interface_addr smi_addr; |
186 | struct kernel_ipmi_msg send_msg; | 186 | struct kernel_ipmi_msg send_msg; |
@@ -208,7 +208,7 @@ static void pps_poweroff_atca(ipmi_user_t user) | |||
208 | return; | 208 | return; |
209 | } | 209 | } |
210 | 210 | ||
211 | static int ipmi_atca_detect(ipmi_user_t user) | 211 | static int ipmi_atca_detect(struct ipmi_user *user) |
212 | { | 212 | { |
213 | struct ipmi_system_interface_addr smi_addr; | 213 | struct ipmi_system_interface_addr smi_addr; |
214 | struct kernel_ipmi_msg send_msg; | 214 | struct kernel_ipmi_msg send_msg; |
@@ -245,7 +245,7 @@ static int ipmi_atca_detect(ipmi_user_t user) | |||
245 | return !rv; | 245 | return !rv; |
246 | } | 246 | } |
247 | 247 | ||
248 | static void ipmi_poweroff_atca(ipmi_user_t user) | 248 | static void ipmi_poweroff_atca(struct ipmi_user *user) |
249 | { | 249 | { |
250 | struct ipmi_system_interface_addr smi_addr; | 250 | struct ipmi_system_interface_addr smi_addr; |
251 | struct kernel_ipmi_msg send_msg; | 251 | struct kernel_ipmi_msg send_msg; |
@@ -309,13 +309,13 @@ static void ipmi_poweroff_atca(ipmi_user_t user) | |||
309 | #define IPMI_CPI1_PRODUCT_ID 0x000157 | 309 | #define IPMI_CPI1_PRODUCT_ID 0x000157 |
310 | #define IPMI_CPI1_MANUFACTURER_ID 0x0108 | 310 | #define IPMI_CPI1_MANUFACTURER_ID 0x0108 |
311 | 311 | ||
312 | static int ipmi_cpi1_detect(ipmi_user_t user) | 312 | static int ipmi_cpi1_detect(struct ipmi_user *user) |
313 | { | 313 | { |
314 | return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID) | 314 | return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID) |
315 | && (prod_id == IPMI_CPI1_PRODUCT_ID)); | 315 | && (prod_id == IPMI_CPI1_PRODUCT_ID)); |
316 | } | 316 | } |
317 | 317 | ||
318 | static void ipmi_poweroff_cpi1(ipmi_user_t user) | 318 | static void ipmi_poweroff_cpi1(struct ipmi_user *user) |
319 | { | 319 | { |
320 | struct ipmi_system_interface_addr smi_addr; | 320 | struct ipmi_system_interface_addr smi_addr; |
321 | struct ipmi_ipmb_addr ipmb_addr; | 321 | struct ipmi_ipmb_addr ipmb_addr; |
@@ -424,7 +424,7 @@ static void ipmi_poweroff_cpi1(ipmi_user_t user) | |||
424 | */ | 424 | */ |
425 | 425 | ||
426 | #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} | 426 | #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} |
427 | static int ipmi_dell_chassis_detect(ipmi_user_t user) | 427 | static int ipmi_dell_chassis_detect(struct ipmi_user *user) |
428 | { | 428 | { |
429 | const char ipmi_version_major = ipmi_version & 0xF; | 429 | const char ipmi_version_major = ipmi_version & 0xF; |
430 | const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; | 430 | const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; |
@@ -445,7 +445,7 @@ static int ipmi_dell_chassis_detect(ipmi_user_t user) | |||
445 | 445 | ||
446 | #define HP_IANA_MFR_ID 0x0b | 446 | #define HP_IANA_MFR_ID 0x0b |
447 | #define HP_BMC_PROD_ID 0x8201 | 447 | #define HP_BMC_PROD_ID 0x8201 |
448 | static int ipmi_hp_chassis_detect(ipmi_user_t user) | 448 | static int ipmi_hp_chassis_detect(struct ipmi_user *user) |
449 | { | 449 | { |
450 | if (mfg_id == HP_IANA_MFR_ID | 450 | if (mfg_id == HP_IANA_MFR_ID |
451 | && prod_id == HP_BMC_PROD_ID | 451 | && prod_id == HP_BMC_PROD_ID |
@@ -461,13 +461,13 @@ static int ipmi_hp_chassis_detect(ipmi_user_t user) | |||
461 | #define IPMI_NETFN_CHASSIS_REQUEST 0 | 461 | #define IPMI_NETFN_CHASSIS_REQUEST 0 |
462 | #define IPMI_CHASSIS_CONTROL_CMD 0x02 | 462 | #define IPMI_CHASSIS_CONTROL_CMD 0x02 |
463 | 463 | ||
464 | static int ipmi_chassis_detect(ipmi_user_t user) | 464 | static int ipmi_chassis_detect(struct ipmi_user *user) |
465 | { | 465 | { |
466 | /* Chassis support, use it. */ | 466 | /* Chassis support, use it. */ |
467 | return (capabilities & 0x80); | 467 | return (capabilities & 0x80); |
468 | } | 468 | } |
469 | 469 | ||
470 | static void ipmi_poweroff_chassis(ipmi_user_t user) | 470 | static void ipmi_poweroff_chassis(struct ipmi_user *user) |
471 | { | 471 | { |
472 | struct ipmi_system_interface_addr smi_addr; | 472 | struct ipmi_system_interface_addr smi_addr; |
473 | struct kernel_ipmi_msg send_msg; | 473 | struct kernel_ipmi_msg send_msg; |
@@ -517,8 +517,8 @@ static void ipmi_poweroff_chassis(ipmi_user_t user) | |||
517 | /* Table of possible power off functions. */ | 517 | /* Table of possible power off functions. */ |
518 | struct poweroff_function { | 518 | struct poweroff_function { |
519 | char *platform_type; | 519 | char *platform_type; |
520 | int (*detect)(ipmi_user_t user); | 520 | int (*detect)(struct ipmi_user *user); |
521 | void (*poweroff_func)(ipmi_user_t user); | 521 | void (*poweroff_func)(struct ipmi_user *user); |
522 | }; | 522 | }; |
523 | 523 | ||
524 | static struct poweroff_function poweroff_functions[] = { | 524 | static struct poweroff_function poweroff_functions[] = { |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index ff870aa91cfe..ad353be871bf 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -122,8 +122,8 @@ enum si_stat_indexes { | |||
122 | }; | 122 | }; |
123 | 123 | ||
124 | struct smi_info { | 124 | struct smi_info { |
125 | int intf_num; | 125 | int si_num; |
126 | ipmi_smi_t intf; | 126 | struct ipmi_smi *intf; |
127 | struct si_sm_data *si_sm; | 127 | struct si_sm_data *si_sm; |
128 | const struct si_sm_handlers *handlers; | 128 | const struct si_sm_handlers *handlers; |
129 | spinlock_t si_lock; | 129 | spinlock_t si_lock; |
@@ -261,7 +261,6 @@ static int num_max_busy_us; | |||
261 | static bool unload_when_empty = true; | 261 | static bool unload_when_empty = true; |
262 | 262 | ||
263 | static int try_smi_init(struct smi_info *smi); | 263 | static int try_smi_init(struct smi_info *smi); |
264 | static void shutdown_one_si(struct smi_info *smi_info); | ||
265 | static void cleanup_one_si(struct smi_info *smi_info); | 264 | static void cleanup_one_si(struct smi_info *smi_info); |
266 | static void cleanup_ipmi_si(void); | 265 | static void cleanup_ipmi_si(void); |
267 | 266 | ||
@@ -287,10 +286,7 @@ static void deliver_recv_msg(struct smi_info *smi_info, | |||
287 | struct ipmi_smi_msg *msg) | 286 | struct ipmi_smi_msg *msg) |
288 | { | 287 | { |
289 | /* Deliver the message to the upper layer. */ | 288 | /* Deliver the message to the upper layer. */ |
290 | if (smi_info->intf) | 289 | ipmi_smi_msg_received(smi_info->intf, msg); |
291 | ipmi_smi_msg_received(smi_info->intf, msg); | ||
292 | else | ||
293 | ipmi_free_smi_msg(msg); | ||
294 | } | 290 | } |
295 | 291 | ||
296 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) | 292 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) |
@@ -471,8 +467,7 @@ retry: | |||
471 | 467 | ||
472 | start_clear_flags(smi_info); | 468 | start_clear_flags(smi_info); |
473 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; | 469 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
474 | if (smi_info->intf) | 470 | ipmi_smi_watchdog_pretimeout(smi_info->intf); |
475 | ipmi_smi_watchdog_pretimeout(smi_info->intf); | ||
476 | } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { | 471 | } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { |
477 | /* Messages available. */ | 472 | /* Messages available. */ |
478 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); | 473 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); |
@@ -798,8 +793,7 @@ restart: | |||
798 | * We prefer handling attn over new messages. But don't do | 793 | * We prefer handling attn over new messages. But don't do |
799 | * this if there is not yet an upper layer to handle anything. | 794 | * this if there is not yet an upper layer to handle anything. |
800 | */ | 795 | */ |
801 | if (likely(smi_info->intf) && | 796 | if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) { |
802 | (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) { | ||
803 | unsigned char msg[2]; | 797 | unsigned char msg[2]; |
804 | 798 | ||
805 | if (smi_info->si_state != SI_NORMAL) { | 799 | if (smi_info->si_state != SI_NORMAL) { |
@@ -962,8 +956,8 @@ static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, | |||
962 | { | 956 | { |
963 | unsigned int max_busy_us = 0; | 957 | unsigned int max_busy_us = 0; |
964 | 958 | ||
965 | if (smi_info->intf_num < num_max_busy_us) | 959 | if (smi_info->si_num < num_max_busy_us) |
966 | max_busy_us = kipmid_max_busy_us[smi_info->intf_num]; | 960 | max_busy_us = kipmid_max_busy_us[smi_info->si_num]; |
967 | if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) | 961 | if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) |
968 | ipmi_si_set_not_busy(busy_until); | 962 | ipmi_si_set_not_busy(busy_until); |
969 | else if (!ipmi_si_is_busy(busy_until)) { | 963 | else if (!ipmi_si_is_busy(busy_until)) { |
@@ -1143,8 +1137,8 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data) | |||
1143 | return IRQ_HANDLED; | 1137 | return IRQ_HANDLED; |
1144 | } | 1138 | } |
1145 | 1139 | ||
1146 | static int smi_start_processing(void *send_info, | 1140 | static int smi_start_processing(void *send_info, |
1147 | ipmi_smi_t intf) | 1141 | struct ipmi_smi *intf) |
1148 | { | 1142 | { |
1149 | struct smi_info *new_smi = send_info; | 1143 | struct smi_info *new_smi = send_info; |
1150 | int enable = 0; | 1144 | int enable = 0; |
@@ -1165,8 +1159,8 @@ static int smi_start_processing(void *send_info, | |||
1165 | /* | 1159 | /* |
1166 | * Check if the user forcefully enabled the daemon. | 1160 | * Check if the user forcefully enabled the daemon. |
1167 | */ | 1161 | */ |
1168 | if (new_smi->intf_num < num_force_kipmid) | 1162 | if (new_smi->si_num < num_force_kipmid) |
1169 | enable = force_kipmid[new_smi->intf_num]; | 1163 | enable = force_kipmid[new_smi->si_num]; |
1170 | /* | 1164 | /* |
1171 | * The BT interface is efficient enough to not need a thread, | 1165 | * The BT interface is efficient enough to not need a thread, |
1172 | * and there is no need for a thread if we have interrupts. | 1166 | * and there is no need for a thread if we have interrupts. |
@@ -1176,7 +1170,7 @@ static int smi_start_processing(void *send_info, | |||
1176 | 1170 | ||
1177 | if (enable) { | 1171 | if (enable) { |
1178 | new_smi->thread = kthread_run(ipmi_thread, new_smi, | 1172 | new_smi->thread = kthread_run(ipmi_thread, new_smi, |
1179 | "kipmi%d", new_smi->intf_num); | 1173 | "kipmi%d", new_smi->si_num); |
1180 | if (IS_ERR(new_smi->thread)) { | 1174 | if (IS_ERR(new_smi->thread)) { |
1181 | dev_notice(new_smi->io.dev, "Could not start" | 1175 | dev_notice(new_smi->io.dev, "Could not start" |
1182 | " kernel thread due to error %ld, only using" | 1176 | " kernel thread due to error %ld, only using" |
@@ -1209,9 +1203,11 @@ static void set_maintenance_mode(void *send_info, bool enable) | |||
1209 | atomic_set(&smi_info->req_events, 0); | 1203 | atomic_set(&smi_info->req_events, 0); |
1210 | } | 1204 | } |
1211 | 1205 | ||
1206 | static void shutdown_smi(void *send_info); | ||
1212 | static const struct ipmi_smi_handlers handlers = { | 1207 | static const struct ipmi_smi_handlers handlers = { |
1213 | .owner = THIS_MODULE, | 1208 | .owner = THIS_MODULE, |
1214 | .start_processing = smi_start_processing, | 1209 | .start_processing = smi_start_processing, |
1210 | .shutdown = shutdown_smi, | ||
1215 | .get_smi_info = get_smi_info, | 1211 | .get_smi_info = get_smi_info, |
1216 | .sender = sender, | 1212 | .sender = sender, |
1217 | .request_events = request_events, | 1213 | .request_events = request_events, |
@@ -1592,102 +1588,6 @@ out: | |||
1592 | return rv; | 1588 | return rv; |
1593 | } | 1589 | } |
1594 | 1590 | ||
1595 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
1596 | static int smi_type_proc_show(struct seq_file *m, void *v) | ||
1597 | { | ||
1598 | struct smi_info *smi = m->private; | ||
1599 | |||
1600 | seq_printf(m, "%s\n", si_to_str[smi->io.si_type]); | ||
1601 | |||
1602 | return 0; | ||
1603 | } | ||
1604 | |||
1605 | static int smi_type_proc_open(struct inode *inode, struct file *file) | ||
1606 | { | ||
1607 | return single_open(file, smi_type_proc_show, PDE_DATA(inode)); | ||
1608 | } | ||
1609 | |||
1610 | static const struct file_operations smi_type_proc_ops = { | ||
1611 | .open = smi_type_proc_open, | ||
1612 | .read = seq_read, | ||
1613 | .llseek = seq_lseek, | ||
1614 | .release = single_release, | ||
1615 | }; | ||
1616 | |||
1617 | static int smi_si_stats_proc_show(struct seq_file *m, void *v) | ||
1618 | { | ||
1619 | struct smi_info *smi = m->private; | ||
1620 | |||
1621 | seq_printf(m, "interrupts_enabled: %d\n", | ||
1622 | smi->io.irq && !smi->interrupt_disabled); | ||
1623 | seq_printf(m, "short_timeouts: %u\n", | ||
1624 | smi_get_stat(smi, short_timeouts)); | ||
1625 | seq_printf(m, "long_timeouts: %u\n", | ||
1626 | smi_get_stat(smi, long_timeouts)); | ||
1627 | seq_printf(m, "idles: %u\n", | ||
1628 | smi_get_stat(smi, idles)); | ||
1629 | seq_printf(m, "interrupts: %u\n", | ||
1630 | smi_get_stat(smi, interrupts)); | ||
1631 | seq_printf(m, "attentions: %u\n", | ||
1632 | smi_get_stat(smi, attentions)); | ||
1633 | seq_printf(m, "flag_fetches: %u\n", | ||
1634 | smi_get_stat(smi, flag_fetches)); | ||
1635 | seq_printf(m, "hosed_count: %u\n", | ||
1636 | smi_get_stat(smi, hosed_count)); | ||
1637 | seq_printf(m, "complete_transactions: %u\n", | ||
1638 | smi_get_stat(smi, complete_transactions)); | ||
1639 | seq_printf(m, "events: %u\n", | ||
1640 | smi_get_stat(smi, events)); | ||
1641 | seq_printf(m, "watchdog_pretimeouts: %u\n", | ||
1642 | smi_get_stat(smi, watchdog_pretimeouts)); | ||
1643 | seq_printf(m, "incoming_messages: %u\n", | ||
1644 | smi_get_stat(smi, incoming_messages)); | ||
1645 | return 0; | ||
1646 | } | ||
1647 | |||
1648 | static int smi_si_stats_proc_open(struct inode *inode, struct file *file) | ||
1649 | { | ||
1650 | return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode)); | ||
1651 | } | ||
1652 | |||
1653 | static const struct file_operations smi_si_stats_proc_ops = { | ||
1654 | .open = smi_si_stats_proc_open, | ||
1655 | .read = seq_read, | ||
1656 | .llseek = seq_lseek, | ||
1657 | .release = single_release, | ||
1658 | }; | ||
1659 | |||
1660 | static int smi_params_proc_show(struct seq_file *m, void *v) | ||
1661 | { | ||
1662 | struct smi_info *smi = m->private; | ||
1663 | |||
1664 | seq_printf(m, | ||
1665 | "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", | ||
1666 | si_to_str[smi->io.si_type], | ||
1667 | addr_space_to_str[smi->io.addr_type], | ||
1668 | smi->io.addr_data, | ||
1669 | smi->io.regspacing, | ||
1670 | smi->io.regsize, | ||
1671 | smi->io.regshift, | ||
1672 | smi->io.irq, | ||
1673 | smi->io.slave_addr); | ||
1674 | |||
1675 | return 0; | ||
1676 | } | ||
1677 | |||
1678 | static int smi_params_proc_open(struct inode *inode, struct file *file) | ||
1679 | { | ||
1680 | return single_open(file, smi_params_proc_show, PDE_DATA(inode)); | ||
1681 | } | ||
1682 | |||
1683 | static const struct file_operations smi_params_proc_ops = { | ||
1684 | .open = smi_params_proc_open, | ||
1685 | .read = seq_read, | ||
1686 | .llseek = seq_lseek, | ||
1687 | .release = single_release, | ||
1688 | }; | ||
1689 | #endif | ||
1690 | |||
1691 | #define IPMI_SI_ATTR(name) \ | 1591 | #define IPMI_SI_ATTR(name) \ |
1692 | static ssize_t ipmi_##name##_show(struct device *dev, \ | 1592 | static ssize_t ipmi_##name##_show(struct device *dev, \ |
1693 | struct device_attribute *attr, \ | 1593 | struct device_attribute *attr, \ |
@@ -2006,14 +1906,8 @@ int ipmi_si_add_smi(struct si_sm_io *io) | |||
2006 | 1906 | ||
2007 | list_add_tail(&new_smi->link, &smi_infos); | 1907 | list_add_tail(&new_smi->link, &smi_infos); |
2008 | 1908 | ||
2009 | if (initialized) { | 1909 | if (initialized) |
2010 | rv = try_smi_init(new_smi); | 1910 | rv = try_smi_init(new_smi); |
2011 | if (rv) { | ||
2012 | cleanup_one_si(new_smi); | ||
2013 | mutex_unlock(&smi_infos_lock); | ||
2014 | return rv; | ||
2015 | } | ||
2016 | } | ||
2017 | out_err: | 1911 | out_err: |
2018 | mutex_unlock(&smi_infos_lock); | 1912 | mutex_unlock(&smi_infos_lock); |
2019 | return rv; | 1913 | return rv; |
@@ -2056,19 +1950,19 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2056 | goto out_err; | 1950 | goto out_err; |
2057 | } | 1951 | } |
2058 | 1952 | ||
2059 | new_smi->intf_num = smi_num; | 1953 | new_smi->si_num = smi_num; |
2060 | 1954 | ||
2061 | /* Do this early so it's available for logs. */ | 1955 | /* Do this early so it's available for logs. */ |
2062 | if (!new_smi->io.dev) { | 1956 | if (!new_smi->io.dev) { |
2063 | init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d", | 1957 | init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d", |
2064 | new_smi->intf_num); | 1958 | new_smi->si_num); |
2065 | 1959 | ||
2066 | /* | 1960 | /* |
2067 | * If we don't already have a device from something | 1961 | * If we don't already have a device from something |
2068 | * else (like PCI), then register a new one. | 1962 | * else (like PCI), then register a new one. |
2069 | */ | 1963 | */ |
2070 | new_smi->pdev = platform_device_alloc("ipmi_si", | 1964 | new_smi->pdev = platform_device_alloc("ipmi_si", |
2071 | new_smi->intf_num); | 1965 | new_smi->si_num); |
2072 | if (!new_smi->pdev) { | 1966 | if (!new_smi->pdev) { |
2073 | pr_err(PFX "Unable to allocate platform device\n"); | 1967 | pr_err(PFX "Unable to allocate platform device\n"); |
2074 | rv = -ENOMEM; | 1968 | rv = -ENOMEM; |
@@ -2182,35 +2076,6 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2182 | goto out_err; | 2076 | goto out_err; |
2183 | } | 2077 | } |
2184 | 2078 | ||
2185 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
2186 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", | ||
2187 | &smi_type_proc_ops, | ||
2188 | new_smi); | ||
2189 | if (rv) { | ||
2190 | dev_err(new_smi->io.dev, | ||
2191 | "Unable to create proc entry: %d\n", rv); | ||
2192 | goto out_err; | ||
2193 | } | ||
2194 | |||
2195 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", | ||
2196 | &smi_si_stats_proc_ops, | ||
2197 | new_smi); | ||
2198 | if (rv) { | ||
2199 | dev_err(new_smi->io.dev, | ||
2200 | "Unable to create proc entry: %d\n", rv); | ||
2201 | goto out_err; | ||
2202 | } | ||
2203 | |||
2204 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", | ||
2205 | &smi_params_proc_ops, | ||
2206 | new_smi); | ||
2207 | if (rv) { | ||
2208 | dev_err(new_smi->io.dev, | ||
2209 | "Unable to create proc entry: %d\n", rv); | ||
2210 | goto out_err; | ||
2211 | } | ||
2212 | #endif | ||
2213 | |||
2214 | /* Don't increment till we know we have succeeded. */ | 2079 | /* Don't increment till we know we have succeeded. */ |
2215 | smi_num++; | 2080 | smi_num++; |
2216 | 2081 | ||
@@ -2223,7 +2088,8 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2223 | return 0; | 2088 | return 0; |
2224 | 2089 | ||
2225 | out_err: | 2090 | out_err: |
2226 | shutdown_one_si(new_smi); | 2091 | ipmi_unregister_smi(new_smi->intf); |
2092 | new_smi->intf = NULL; | ||
2227 | 2093 | ||
2228 | kfree(init_name); | 2094 | kfree(init_name); |
2229 | 2095 | ||
@@ -2301,20 +2167,9 @@ skip_fallback_noirq: | |||
2301 | } | 2167 | } |
2302 | module_init(init_ipmi_si); | 2168 | module_init(init_ipmi_si); |
2303 | 2169 | ||
2304 | static void shutdown_one_si(struct smi_info *smi_info) | 2170 | static void shutdown_smi(void *send_info) |
2305 | { | 2171 | { |
2306 | int rv = 0; | 2172 | struct smi_info *smi_info = send_info; |
2307 | |||
2308 | if (smi_info->intf) { | ||
2309 | ipmi_smi_t intf = smi_info->intf; | ||
2310 | |||
2311 | smi_info->intf = NULL; | ||
2312 | rv = ipmi_unregister_smi(intf); | ||
2313 | if (rv) { | ||
2314 | pr_err(PFX "Unable to unregister device: errno=%d\n", | ||
2315 | rv); | ||
2316 | } | ||
2317 | } | ||
2318 | 2173 | ||
2319 | if (smi_info->dev_group_added) { | 2174 | if (smi_info->dev_group_added) { |
2320 | device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group); | 2175 | device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group); |
@@ -2372,6 +2227,10 @@ static void shutdown_one_si(struct smi_info *smi_info) | |||
2372 | smi_info->si_sm = NULL; | 2227 | smi_info->si_sm = NULL; |
2373 | } | 2228 | } |
2374 | 2229 | ||
2230 | /* | ||
2231 | * Must be called with smi_infos_lock held, to serialize the | ||
2232 | * smi_info->intf check. | ||
2233 | */ | ||
2375 | static void cleanup_one_si(struct smi_info *smi_info) | 2234 | static void cleanup_one_si(struct smi_info *smi_info) |
2376 | { | 2235 | { |
2377 | if (!smi_info) | 2236 | if (!smi_info) |
@@ -2379,7 +2238,10 @@ static void cleanup_one_si(struct smi_info *smi_info) | |||
2379 | 2238 | ||
2380 | list_del(&smi_info->link); | 2239 | list_del(&smi_info->link); |
2381 | 2240 | ||
2382 | shutdown_one_si(smi_info); | 2241 | if (smi_info->intf) { |
2242 | ipmi_unregister_smi(smi_info->intf); | ||
2243 | smi_info->intf = NULL; | ||
2244 | } | ||
2383 | 2245 | ||
2384 | if (smi_info->pdev) { | 2246 | if (smi_info->pdev) { |
2385 | if (smi_info->pdev_registered) | 2247 | if (smi_info->pdev_registered) |
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 35a82f4bfd78..22f634eb09fd 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c | |||
@@ -193,8 +193,7 @@ typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result, | |||
193 | unsigned char *data, unsigned int len); | 193 | unsigned char *data, unsigned int len); |
194 | 194 | ||
195 | struct ssif_info { | 195 | struct ssif_info { |
196 | ipmi_smi_t intf; | 196 | struct ipmi_smi *intf; |
197 | int intf_num; | ||
198 | spinlock_t lock; | 197 | spinlock_t lock; |
199 | struct ipmi_smi_msg *waiting_msg; | 198 | struct ipmi_smi_msg *waiting_msg; |
200 | struct ipmi_smi_msg *curr_msg; | 199 | struct ipmi_smi_msg *curr_msg; |
@@ -290,8 +289,6 @@ struct ssif_info { | |||
290 | 289 | ||
291 | static bool initialized; | 290 | static bool initialized; |
292 | 291 | ||
293 | static atomic_t next_intf = ATOMIC_INIT(0); | ||
294 | |||
295 | static void return_hosed_msg(struct ssif_info *ssif_info, | 292 | static void return_hosed_msg(struct ssif_info *ssif_info, |
296 | struct ipmi_smi_msg *msg); | 293 | struct ipmi_smi_msg *msg); |
297 | static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags); | 294 | static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags); |
@@ -315,17 +312,13 @@ static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info, | |||
315 | static void deliver_recv_msg(struct ssif_info *ssif_info, | 312 | static void deliver_recv_msg(struct ssif_info *ssif_info, |
316 | struct ipmi_smi_msg *msg) | 313 | struct ipmi_smi_msg *msg) |
317 | { | 314 | { |
318 | ipmi_smi_t intf = ssif_info->intf; | 315 | if (msg->rsp_size < 0) { |
319 | |||
320 | if (!intf) { | ||
321 | ipmi_free_smi_msg(msg); | ||
322 | } else if (msg->rsp_size < 0) { | ||
323 | return_hosed_msg(ssif_info, msg); | 316 | return_hosed_msg(ssif_info, msg); |
324 | pr_err(PFX | 317 | pr_err(PFX |
325 | "Malformed message in deliver_recv_msg: rsp_size = %d\n", | 318 | "Malformed message in deliver_recv_msg: rsp_size = %d\n", |
326 | msg->rsp_size); | 319 | msg->rsp_size); |
327 | } else { | 320 | } else { |
328 | ipmi_smi_msg_received(intf, msg); | 321 | ipmi_smi_msg_received(ssif_info->intf, msg); |
329 | } | 322 | } |
330 | } | 323 | } |
331 | 324 | ||
@@ -452,12 +445,10 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, | |||
452 | static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) | 445 | static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) |
453 | { | 446 | { |
454 | if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) { | 447 | if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) { |
455 | ipmi_smi_t intf = ssif_info->intf; | ||
456 | /* Watchdog pre-timeout */ | 448 | /* Watchdog pre-timeout */ |
457 | ssif_inc_stat(ssif_info, watchdog_pretimeouts); | 449 | ssif_inc_stat(ssif_info, watchdog_pretimeouts); |
458 | start_clear_flags(ssif_info, flags); | 450 | start_clear_flags(ssif_info, flags); |
459 | if (intf) | 451 | ipmi_smi_watchdog_pretimeout(ssif_info->intf); |
460 | ipmi_smi_watchdog_pretimeout(intf); | ||
461 | } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL) | 452 | } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL) |
462 | /* Messages available. */ | 453 | /* Messages available. */ |
463 | start_recv_msg_fetch(ssif_info, flags); | 454 | start_recv_msg_fetch(ssif_info, flags); |
@@ -1094,27 +1085,8 @@ static void request_events(void *send_info) | |||
1094 | } | 1085 | } |
1095 | } | 1086 | } |
1096 | 1087 | ||
1097 | static int inc_usecount(void *send_info) | 1088 | static int ssif_start_processing(void *send_info, |
1098 | { | 1089 | struct ipmi_smi *intf) |
1099 | struct ssif_info *ssif_info = send_info; | ||
1100 | |||
1101 | if (!i2c_get_adapter(i2c_adapter_id(ssif_info->client->adapter))) | ||
1102 | return -ENODEV; | ||
1103 | |||
1104 | i2c_use_client(ssif_info->client); | ||
1105 | return 0; | ||
1106 | } | ||
1107 | |||
1108 | static void dec_usecount(void *send_info) | ||
1109 | { | ||
1110 | struct ssif_info *ssif_info = send_info; | ||
1111 | |||
1112 | i2c_release_client(ssif_info->client); | ||
1113 | i2c_put_adapter(ssif_info->client->adapter); | ||
1114 | } | ||
1115 | |||
1116 | static int ssif_start_processing(void *send_info, | ||
1117 | ipmi_smi_t intf) | ||
1118 | { | 1090 | { |
1119 | struct ssif_info *ssif_info = send_info; | 1091 | struct ssif_info *ssif_info = send_info; |
1120 | 1092 | ||
@@ -1225,25 +1197,9 @@ static const struct attribute_group ipmi_ssif_dev_attr_group = { | |||
1225 | .attrs = ipmi_ssif_dev_attrs, | 1197 | .attrs = ipmi_ssif_dev_attrs, |
1226 | }; | 1198 | }; |
1227 | 1199 | ||
1228 | static int ssif_remove(struct i2c_client *client) | 1200 | static void shutdown_ssif(void *send_info) |
1229 | { | 1201 | { |
1230 | struct ssif_info *ssif_info = i2c_get_clientdata(client); | 1202 | struct ssif_info *ssif_info = send_info; |
1231 | struct ssif_addr_info *addr_info; | ||
1232 | int rv; | ||
1233 | |||
1234 | if (!ssif_info) | ||
1235 | return 0; | ||
1236 | |||
1237 | /* | ||
1238 | * After this point, we won't deliver anything asychronously | ||
1239 | * to the message handler. We can unregister ourself. | ||
1240 | */ | ||
1241 | rv = ipmi_unregister_smi(ssif_info->intf); | ||
1242 | if (rv) { | ||
1243 | pr_err(PFX "Unable to unregister device: errno=%d\n", rv); | ||
1244 | return rv; | ||
1245 | } | ||
1246 | ssif_info->intf = NULL; | ||
1247 | 1203 | ||
1248 | device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group); | 1204 | device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group); |
1249 | dev_set_drvdata(&ssif_info->client->dev, NULL); | 1205 | dev_set_drvdata(&ssif_info->client->dev, NULL); |
@@ -1259,6 +1215,30 @@ static int ssif_remove(struct i2c_client *client) | |||
1259 | kthread_stop(ssif_info->thread); | 1215 | kthread_stop(ssif_info->thread); |
1260 | } | 1216 | } |
1261 | 1217 | ||
1218 | /* | ||
1219 | * No message can be outstanding now, we have removed the | ||
1220 | * upper layer and it permitted us to do so. | ||
1221 | */ | ||
1222 | kfree(ssif_info); | ||
1223 | } | ||
1224 | |||
1225 | static int ssif_remove(struct i2c_client *client) | ||
1226 | { | ||
1227 | struct ssif_info *ssif_info = i2c_get_clientdata(client); | ||
1228 | struct ipmi_smi *intf; | ||
1229 | struct ssif_addr_info *addr_info; | ||
1230 | |||
1231 | if (!ssif_info) | ||
1232 | return 0; | ||
1233 | |||
1234 | /* | ||
1235 | * After this point, we won't deliver anything asychronously | ||
1236 | * to the message handler. We can unregister ourself. | ||
1237 | */ | ||
1238 | intf = ssif_info->intf; | ||
1239 | ssif_info->intf = NULL; | ||
1240 | ipmi_unregister_smi(intf); | ||
1241 | |||
1262 | list_for_each_entry(addr_info, &ssif_infos, link) { | 1242 | list_for_each_entry(addr_info, &ssif_infos, link) { |
1263 | if (addr_info->client == client) { | 1243 | if (addr_info->client == client) { |
1264 | addr_info->client = NULL; | 1244 | addr_info->client = NULL; |
@@ -1266,11 +1246,6 @@ static int ssif_remove(struct i2c_client *client) | |||
1266 | } | 1246 | } |
1267 | } | 1247 | } |
1268 | 1248 | ||
1269 | /* | ||
1270 | * No message can be outstanding now, we have removed the | ||
1271 | * upper layer and it permitted us to do so. | ||
1272 | */ | ||
1273 | kfree(ssif_info); | ||
1274 | return 0; | 1249 | return 0; |
1275 | } | 1250 | } |
1276 | 1251 | ||
@@ -1341,72 +1316,6 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info) | |||
1341 | return rv; | 1316 | return rv; |
1342 | } | 1317 | } |
1343 | 1318 | ||
1344 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
1345 | static int smi_type_proc_show(struct seq_file *m, void *v) | ||
1346 | { | ||
1347 | seq_puts(m, "ssif\n"); | ||
1348 | |||
1349 | return 0; | ||
1350 | } | ||
1351 | |||
1352 | static int smi_type_proc_open(struct inode *inode, struct file *file) | ||
1353 | { | ||
1354 | return single_open(file, smi_type_proc_show, inode->i_private); | ||
1355 | } | ||
1356 | |||
1357 | static const struct file_operations smi_type_proc_ops = { | ||
1358 | .open = smi_type_proc_open, | ||
1359 | .read = seq_read, | ||
1360 | .llseek = seq_lseek, | ||
1361 | .release = single_release, | ||
1362 | }; | ||
1363 | |||
1364 | static int smi_stats_proc_show(struct seq_file *m, void *v) | ||
1365 | { | ||
1366 | struct ssif_info *ssif_info = m->private; | ||
1367 | |||
1368 | seq_printf(m, "sent_messages: %u\n", | ||
1369 | ssif_get_stat(ssif_info, sent_messages)); | ||
1370 | seq_printf(m, "sent_messages_parts: %u\n", | ||
1371 | ssif_get_stat(ssif_info, sent_messages_parts)); | ||
1372 | seq_printf(m, "send_retries: %u\n", | ||
1373 | ssif_get_stat(ssif_info, send_retries)); | ||
1374 | seq_printf(m, "send_errors: %u\n", | ||
1375 | ssif_get_stat(ssif_info, send_errors)); | ||
1376 | seq_printf(m, "received_messages: %u\n", | ||
1377 | ssif_get_stat(ssif_info, received_messages)); | ||
1378 | seq_printf(m, "received_message_parts: %u\n", | ||
1379 | ssif_get_stat(ssif_info, received_message_parts)); | ||
1380 | seq_printf(m, "receive_retries: %u\n", | ||
1381 | ssif_get_stat(ssif_info, receive_retries)); | ||
1382 | seq_printf(m, "receive_errors: %u\n", | ||
1383 | ssif_get_stat(ssif_info, receive_errors)); | ||
1384 | seq_printf(m, "flag_fetches: %u\n", | ||
1385 | ssif_get_stat(ssif_info, flag_fetches)); | ||
1386 | seq_printf(m, "hosed: %u\n", | ||
1387 | ssif_get_stat(ssif_info, hosed)); | ||
1388 | seq_printf(m, "events: %u\n", | ||
1389 | ssif_get_stat(ssif_info, events)); | ||
1390 | seq_printf(m, "watchdog_pretimeouts: %u\n", | ||
1391 | ssif_get_stat(ssif_info, watchdog_pretimeouts)); | ||
1392 | seq_printf(m, "alerts: %u\n", | ||
1393 | ssif_get_stat(ssif_info, alerts)); | ||
1394 | return 0; | ||
1395 | } | ||
1396 | |||
1397 | static int smi_stats_proc_open(struct inode *inode, struct file *file) | ||
1398 | { | ||
1399 | return single_open(file, smi_stats_proc_show, PDE_DATA(inode)); | ||
1400 | } | ||
1401 | |||
1402 | static const struct file_operations smi_stats_proc_ops = { | ||
1403 | .open = smi_stats_proc_open, | ||
1404 | .read = seq_read, | ||
1405 | .llseek = seq_lseek, | ||
1406 | .release = single_release, | ||
1407 | }; | ||
1408 | #endif | ||
1409 | |||
1410 | static int strcmp_nospace(char *s1, char *s2) | 1319 | static int strcmp_nospace(char *s1, char *s2) |
1411 | { | 1320 | { |
1412 | while (*s1 && *s2) { | 1321 | while (*s1 && *s2) { |
@@ -1678,8 +1587,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1678 | } | 1587 | } |
1679 | 1588 | ||
1680 | found: | 1589 | found: |
1681 | ssif_info->intf_num = atomic_inc_return(&next_intf); | ||
1682 | |||
1683 | if (ssif_dbg_probe) { | 1590 | if (ssif_dbg_probe) { |
1684 | pr_info("ssif_probe: i2c_probe found device at i2c address %x\n", | 1591 | pr_info("ssif_probe: i2c_probe found device at i2c address %x\n", |
1685 | client->addr); | 1592 | client->addr); |
@@ -1697,11 +1604,10 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1697 | 1604 | ||
1698 | ssif_info->handlers.owner = THIS_MODULE; | 1605 | ssif_info->handlers.owner = THIS_MODULE; |
1699 | ssif_info->handlers.start_processing = ssif_start_processing; | 1606 | ssif_info->handlers.start_processing = ssif_start_processing; |
1607 | ssif_info->handlers.shutdown = shutdown_ssif; | ||
1700 | ssif_info->handlers.get_smi_info = get_smi_info; | 1608 | ssif_info->handlers.get_smi_info = get_smi_info; |
1701 | ssif_info->handlers.sender = sender; | 1609 | ssif_info->handlers.sender = sender; |
1702 | ssif_info->handlers.request_events = request_events; | 1610 | ssif_info->handlers.request_events = request_events; |
1703 | ssif_info->handlers.inc_usecount = inc_usecount; | ||
1704 | ssif_info->handlers.dec_usecount = dec_usecount; | ||
1705 | 1611 | ||
1706 | { | 1612 | { |
1707 | unsigned int thread_num; | 1613 | unsigned int thread_num; |
@@ -1740,24 +1646,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1740 | goto out_remove_attr; | 1646 | goto out_remove_attr; |
1741 | } | 1647 | } |
1742 | 1648 | ||
1743 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
1744 | rv = ipmi_smi_add_proc_entry(ssif_info->intf, "type", | ||
1745 | &smi_type_proc_ops, | ||
1746 | ssif_info); | ||
1747 | if (rv) { | ||
1748 | pr_err(PFX "Unable to create proc entry: %d\n", rv); | ||
1749 | goto out_err_unreg; | ||
1750 | } | ||
1751 | |||
1752 | rv = ipmi_smi_add_proc_entry(ssif_info->intf, "ssif_stats", | ||
1753 | &smi_stats_proc_ops, | ||
1754 | ssif_info); | ||
1755 | if (rv) { | ||
1756 | pr_err(PFX "Unable to create proc entry: %d\n", rv); | ||
1757 | goto out_err_unreg; | ||
1758 | } | ||
1759 | #endif | ||
1760 | |||
1761 | out: | 1649 | out: |
1762 | if (rv) { | 1650 | if (rv) { |
1763 | /* | 1651 | /* |
@@ -1775,11 +1663,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1775 | kfree(resp); | 1663 | kfree(resp); |
1776 | return rv; | 1664 | return rv; |
1777 | 1665 | ||
1778 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
1779 | out_err_unreg: | ||
1780 | ipmi_unregister_smi(ssif_info->intf); | ||
1781 | #endif | ||
1782 | |||
1783 | out_remove_attr: | 1666 | out_remove_attr: |
1784 | device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group); | 1667 | device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group); |
1785 | dev_set_drvdata(&ssif_info->client->dev, NULL); | 1668 | dev_set_drvdata(&ssif_info->client->dev, NULL); |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 22bc287eac2d..ca1c5c5109f0 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
@@ -125,7 +125,7 @@ | |||
125 | static DEFINE_MUTEX(ipmi_watchdog_mutex); | 125 | static DEFINE_MUTEX(ipmi_watchdog_mutex); |
126 | static bool nowayout = WATCHDOG_NOWAYOUT; | 126 | static bool nowayout = WATCHDOG_NOWAYOUT; |
127 | 127 | ||
128 | static ipmi_user_t watchdog_user; | 128 | static struct ipmi_user *watchdog_user; |
129 | static int watchdog_ifnum; | 129 | static int watchdog_ifnum; |
130 | 130 | ||
131 | /* Default the timeout to 10 seconds. */ | 131 | /* Default the timeout to 10 seconds. */ |
@@ -153,7 +153,7 @@ static DEFINE_SPINLOCK(ipmi_read_lock); | |||
153 | static char data_to_read; | 153 | static char data_to_read; |
154 | static DECLARE_WAIT_QUEUE_HEAD(read_q); | 154 | static DECLARE_WAIT_QUEUE_HEAD(read_q); |
155 | static struct fasync_struct *fasync_q; | 155 | static struct fasync_struct *fasync_q; |
156 | static char pretimeout_since_last_heartbeat; | 156 | static atomic_t pretimeout_since_last_heartbeat; |
157 | static char expect_close; | 157 | static char expect_close; |
158 | 158 | ||
159 | static int ifnum_to_use = -1; | 159 | static int ifnum_to_use = -1; |
@@ -303,9 +303,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " | |||
303 | /* Default state of the timer. */ | 303 | /* Default state of the timer. */ |
304 | static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; | 304 | static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; |
305 | 305 | ||
306 | /* If shutting down via IPMI, we ignore the heartbeat. */ | ||
307 | static int ipmi_ignore_heartbeat; | ||
308 | |||
309 | /* Is someone using the watchdog? Only one user is allowed. */ | 306 | /* Is someone using the watchdog? Only one user is allowed. */ |
310 | static unsigned long ipmi_wdog_open; | 307 | static unsigned long ipmi_wdog_open; |
311 | 308 | ||
@@ -329,35 +326,33 @@ static int testing_nmi; | |||
329 | static int nmi_handler_registered; | 326 | static int nmi_handler_registered; |
330 | #endif | 327 | #endif |
331 | 328 | ||
332 | static int ipmi_heartbeat(void); | 329 | static int __ipmi_heartbeat(void); |
333 | 330 | ||
334 | /* | 331 | /* |
335 | * We use a mutex to make sure that only one thing can send a set | 332 | * We use a mutex to make sure that only one thing can send a set a |
336 | * timeout at one time, because we only have one copy of the data. | 333 | * message at one time. The mutex is claimed when a message is sent |
337 | * The mutex is claimed when the set_timeout is sent and freed | 334 | * and freed when both the send and receive messages are free. |
338 | * when both messages are free. | ||
339 | */ | 335 | */ |
340 | static atomic_t set_timeout_tofree = ATOMIC_INIT(0); | 336 | static atomic_t msg_tofree = ATOMIC_INIT(0); |
341 | static DEFINE_MUTEX(set_timeout_lock); | 337 | static DECLARE_COMPLETION(msg_wait); |
342 | static DECLARE_COMPLETION(set_timeout_wait); | 338 | static void msg_free_smi(struct ipmi_smi_msg *msg) |
343 | static void set_timeout_free_smi(struct ipmi_smi_msg *msg) | ||
344 | { | 339 | { |
345 | if (atomic_dec_and_test(&set_timeout_tofree)) | 340 | if (atomic_dec_and_test(&msg_tofree)) |
346 | complete(&set_timeout_wait); | 341 | complete(&msg_wait); |
347 | } | 342 | } |
348 | static void set_timeout_free_recv(struct ipmi_recv_msg *msg) | 343 | static void msg_free_recv(struct ipmi_recv_msg *msg) |
349 | { | 344 | { |
350 | if (atomic_dec_and_test(&set_timeout_tofree)) | 345 | if (atomic_dec_and_test(&msg_tofree)) |
351 | complete(&set_timeout_wait); | 346 | complete(&msg_wait); |
352 | } | 347 | } |
353 | static struct ipmi_smi_msg set_timeout_smi_msg = { | 348 | static struct ipmi_smi_msg smi_msg = { |
354 | .done = set_timeout_free_smi | 349 | .done = msg_free_smi |
355 | }; | 350 | }; |
356 | static struct ipmi_recv_msg set_timeout_recv_msg = { | 351 | static struct ipmi_recv_msg recv_msg = { |
357 | .done = set_timeout_free_recv | 352 | .done = msg_free_recv |
358 | }; | 353 | }; |
359 | 354 | ||
360 | static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, | 355 | static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, |
361 | struct ipmi_recv_msg *recv_msg, | 356 | struct ipmi_recv_msg *recv_msg, |
362 | int *send_heartbeat_now) | 357 | int *send_heartbeat_now) |
363 | { | 358 | { |
@@ -368,9 +363,6 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, | |||
368 | int hbnow = 0; | 363 | int hbnow = 0; |
369 | 364 | ||
370 | 365 | ||
371 | /* These can be cleared as we are setting the timeout. */ | ||
372 | pretimeout_since_last_heartbeat = 0; | ||
373 | |||
374 | data[0] = 0; | 366 | data[0] = 0; |
375 | WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); | 367 | WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); |
376 | 368 | ||
@@ -414,46 +406,48 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, | |||
414 | smi_msg, | 406 | smi_msg, |
415 | recv_msg, | 407 | recv_msg, |
416 | 1); | 408 | 1); |
417 | if (rv) { | 409 | if (rv) |
418 | printk(KERN_WARNING PFX "set timeout error: %d\n", | 410 | pr_warn(PFX "set timeout error: %d\n", rv); |
419 | rv); | 411 | else if (send_heartbeat_now) |
420 | } | 412 | *send_heartbeat_now = hbnow; |
421 | |||
422 | if (send_heartbeat_now) | ||
423 | *send_heartbeat_now = hbnow; | ||
424 | 413 | ||
425 | return rv; | 414 | return rv; |
426 | } | 415 | } |
427 | 416 | ||
428 | static int ipmi_set_timeout(int do_heartbeat) | 417 | static int _ipmi_set_timeout(int do_heartbeat) |
429 | { | 418 | { |
430 | int send_heartbeat_now; | 419 | int send_heartbeat_now; |
431 | int rv; | 420 | int rv; |
432 | 421 | ||
422 | if (!watchdog_user) | ||
423 | return -ENODEV; | ||
433 | 424 | ||
434 | /* We can only send one of these at a time. */ | 425 | atomic_set(&msg_tofree, 2); |
435 | mutex_lock(&set_timeout_lock); | ||
436 | |||
437 | atomic_set(&set_timeout_tofree, 2); | ||
438 | 426 | ||
439 | rv = i_ipmi_set_timeout(&set_timeout_smi_msg, | 427 | rv = __ipmi_set_timeout(&smi_msg, |
440 | &set_timeout_recv_msg, | 428 | &recv_msg, |
441 | &send_heartbeat_now); | 429 | &send_heartbeat_now); |
442 | if (rv) { | 430 | if (rv) |
443 | mutex_unlock(&set_timeout_lock); | 431 | return rv; |
444 | goto out; | ||
445 | } | ||
446 | |||
447 | wait_for_completion(&set_timeout_wait); | ||
448 | 432 | ||
449 | mutex_unlock(&set_timeout_lock); | 433 | wait_for_completion(&msg_wait); |
450 | 434 | ||
451 | if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB) | 435 | if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB) |
452 | || ((send_heartbeat_now) | 436 | || ((send_heartbeat_now) |
453 | && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY))) | 437 | && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY))) |
454 | rv = ipmi_heartbeat(); | 438 | rv = __ipmi_heartbeat(); |
439 | |||
440 | return rv; | ||
441 | } | ||
442 | |||
443 | static int ipmi_set_timeout(int do_heartbeat) | ||
444 | { | ||
445 | int rv; | ||
446 | |||
447 | mutex_lock(&ipmi_watchdog_mutex); | ||
448 | rv = _ipmi_set_timeout(do_heartbeat); | ||
449 | mutex_unlock(&ipmi_watchdog_mutex); | ||
455 | 450 | ||
456 | out: | ||
457 | return rv; | 451 | return rv; |
458 | } | 452 | } |
459 | 453 | ||
@@ -531,13 +525,12 @@ static void panic_halt_ipmi_set_timeout(void) | |||
531 | while (atomic_read(&panic_done_count) != 0) | 525 | while (atomic_read(&panic_done_count) != 0) |
532 | ipmi_poll_interface(watchdog_user); | 526 | ipmi_poll_interface(watchdog_user); |
533 | atomic_add(1, &panic_done_count); | 527 | atomic_add(1, &panic_done_count); |
534 | rv = i_ipmi_set_timeout(&panic_halt_smi_msg, | 528 | rv = __ipmi_set_timeout(&panic_halt_smi_msg, |
535 | &panic_halt_recv_msg, | 529 | &panic_halt_recv_msg, |
536 | &send_heartbeat_now); | 530 | &send_heartbeat_now); |
537 | if (rv) { | 531 | if (rv) { |
538 | atomic_sub(1, &panic_done_count); | 532 | atomic_sub(1, &panic_done_count); |
539 | printk(KERN_WARNING PFX | 533 | pr_warn(PFX "Unable to extend the watchdog timeout."); |
540 | "Unable to extend the watchdog timeout."); | ||
541 | } else { | 534 | } else { |
542 | if (send_heartbeat_now) | 535 | if (send_heartbeat_now) |
543 | panic_halt_ipmi_heartbeat(); | 536 | panic_halt_ipmi_heartbeat(); |
@@ -546,69 +539,22 @@ static void panic_halt_ipmi_set_timeout(void) | |||
546 | ipmi_poll_interface(watchdog_user); | 539 | ipmi_poll_interface(watchdog_user); |
547 | } | 540 | } |
548 | 541 | ||
549 | /* | 542 | static int __ipmi_heartbeat(void) |
550 | * We use a mutex to make sure that only one thing can send a | ||
551 | * heartbeat at one time, because we only have one copy of the data. | ||
552 | * The semaphore is claimed when the set_timeout is sent and freed | ||
553 | * when both messages are free. | ||
554 | */ | ||
555 | static atomic_t heartbeat_tofree = ATOMIC_INIT(0); | ||
556 | static DEFINE_MUTEX(heartbeat_lock); | ||
557 | static DECLARE_COMPLETION(heartbeat_wait); | ||
558 | static void heartbeat_free_smi(struct ipmi_smi_msg *msg) | ||
559 | { | ||
560 | if (atomic_dec_and_test(&heartbeat_tofree)) | ||
561 | complete(&heartbeat_wait); | ||
562 | } | ||
563 | static void heartbeat_free_recv(struct ipmi_recv_msg *msg) | ||
564 | { | ||
565 | if (atomic_dec_and_test(&heartbeat_tofree)) | ||
566 | complete(&heartbeat_wait); | ||
567 | } | ||
568 | static struct ipmi_smi_msg heartbeat_smi_msg = { | ||
569 | .done = heartbeat_free_smi | ||
570 | }; | ||
571 | static struct ipmi_recv_msg heartbeat_recv_msg = { | ||
572 | .done = heartbeat_free_recv | ||
573 | }; | ||
574 | |||
575 | static int ipmi_heartbeat(void) | ||
576 | { | 543 | { |
577 | struct kernel_ipmi_msg msg; | 544 | struct kernel_ipmi_msg msg; |
578 | int rv; | 545 | int rv; |
579 | struct ipmi_system_interface_addr addr; | 546 | struct ipmi_system_interface_addr addr; |
580 | int timeout_retries = 0; | 547 | int timeout_retries = 0; |
581 | |||
582 | if (ipmi_ignore_heartbeat) | ||
583 | return 0; | ||
584 | |||
585 | if (ipmi_start_timer_on_heartbeat) { | ||
586 | ipmi_start_timer_on_heartbeat = 0; | ||
587 | ipmi_watchdog_state = action_val; | ||
588 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | ||
589 | } else if (pretimeout_since_last_heartbeat) { | ||
590 | /* | ||
591 | * A pretimeout occurred, make sure we set the timeout. | ||
592 | * We don't want to set the action, though, we want to | ||
593 | * leave that alone (thus it can't be combined with the | ||
594 | * above operation. | ||
595 | */ | ||
596 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); | ||
597 | } | ||
598 | |||
599 | mutex_lock(&heartbeat_lock); | ||
600 | 548 | ||
601 | restart: | 549 | restart: |
602 | atomic_set(&heartbeat_tofree, 2); | ||
603 | |||
604 | /* | 550 | /* |
605 | * Don't reset the timer if we have the timer turned off, that | 551 | * Don't reset the timer if we have the timer turned off, that |
606 | * re-enables the watchdog. | 552 | * re-enables the watchdog. |
607 | */ | 553 | */ |
608 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) { | 554 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) |
609 | mutex_unlock(&heartbeat_lock); | ||
610 | return 0; | 555 | return 0; |
611 | } | 556 | |
557 | atomic_set(&msg_tofree, 2); | ||
612 | 558 | ||
613 | addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 559 | addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
614 | addr.channel = IPMI_BMC_CHANNEL; | 560 | addr.channel = IPMI_BMC_CHANNEL; |
@@ -623,26 +569,23 @@ restart: | |||
623 | 0, | 569 | 0, |
624 | &msg, | 570 | &msg, |
625 | NULL, | 571 | NULL, |
626 | &heartbeat_smi_msg, | 572 | &smi_msg, |
627 | &heartbeat_recv_msg, | 573 | &recv_msg, |
628 | 1); | 574 | 1); |
629 | if (rv) { | 575 | if (rv) { |
630 | mutex_unlock(&heartbeat_lock); | 576 | pr_warn(PFX "heartbeat send failure: %d\n", rv); |
631 | printk(KERN_WARNING PFX "heartbeat failure: %d\n", | ||
632 | rv); | ||
633 | return rv; | 577 | return rv; |
634 | } | 578 | } |
635 | 579 | ||
636 | /* Wait for the heartbeat to be sent. */ | 580 | /* Wait for the heartbeat to be sent. */ |
637 | wait_for_completion(&heartbeat_wait); | 581 | wait_for_completion(&msg_wait); |
638 | 582 | ||
639 | if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) { | 583 | if (recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) { |
640 | timeout_retries++; | 584 | timeout_retries++; |
641 | if (timeout_retries > 3) { | 585 | if (timeout_retries > 3) { |
642 | printk(KERN_ERR PFX ": Unable to restore the IPMI" | 586 | pr_err(PFX ": Unable to restore the IPMI watchdog's settings, giving up.\n"); |
643 | " watchdog's settings, giving up.\n"); | ||
644 | rv = -EIO; | 587 | rv = -EIO; |
645 | goto out_unlock; | 588 | goto out; |
646 | } | 589 | } |
647 | 590 | ||
648 | /* | 591 | /* |
@@ -651,18 +594,17 @@ restart: | |||
651 | * to restore the timer's info. Note that we still hold | 594 | * to restore the timer's info. Note that we still hold |
652 | * the heartbeat lock, to keep a heartbeat from happening | 595 | * the heartbeat lock, to keep a heartbeat from happening |
653 | * in this process, so must say no heartbeat to avoid a | 596 | * in this process, so must say no heartbeat to avoid a |
654 | * deadlock on this mutex. | 597 | * deadlock on this mutex |
655 | */ | 598 | */ |
656 | rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); | 599 | rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); |
657 | if (rv) { | 600 | if (rv) { |
658 | printk(KERN_ERR PFX ": Unable to send the command to" | 601 | pr_err(PFX ": Unable to send the command to set the watchdog's settings, giving up.\n"); |
659 | " set the watchdog's settings, giving up.\n"); | 602 | goto out; |
660 | goto out_unlock; | ||
661 | } | 603 | } |
662 | 604 | ||
663 | /* We might need a new heartbeat, so do it now */ | 605 | /* Might need a heartbeat send, go ahead and do it. */ |
664 | goto restart; | 606 | goto restart; |
665 | } else if (heartbeat_recv_msg.msg.data[0] != 0) { | 607 | } else if (recv_msg.msg.data[0] != 0) { |
666 | /* | 608 | /* |
667 | * Got an error in the heartbeat response. It was already | 609 | * Got an error in the heartbeat response. It was already |
668 | * reported in ipmi_wdog_msg_handler, but we should return | 610 | * reported in ipmi_wdog_msg_handler, but we should return |
@@ -671,8 +613,43 @@ restart: | |||
671 | rv = -EINVAL; | 613 | rv = -EINVAL; |
672 | } | 614 | } |
673 | 615 | ||
674 | out_unlock: | 616 | out: |
675 | mutex_unlock(&heartbeat_lock); | 617 | return rv; |
618 | } | ||
619 | |||
620 | static int _ipmi_heartbeat(void) | ||
621 | { | ||
622 | int rv; | ||
623 | |||
624 | if (!watchdog_user) | ||
625 | return -ENODEV; | ||
626 | |||
627 | if (ipmi_start_timer_on_heartbeat) { | ||
628 | ipmi_start_timer_on_heartbeat = 0; | ||
629 | ipmi_watchdog_state = action_val; | ||
630 | rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | ||
631 | } else if (atomic_cmpxchg(&pretimeout_since_last_heartbeat, 1, 0)) { | ||
632 | /* | ||
633 | * A pretimeout occurred, make sure we set the timeout. | ||
634 | * We don't want to set the action, though, we want to | ||
635 | * leave that alone (thus it can't be combined with the | ||
636 | * above operation. | ||
637 | */ | ||
638 | rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); | ||
639 | } else { | ||
640 | rv = __ipmi_heartbeat(); | ||
641 | } | ||
642 | |||
643 | return rv; | ||
644 | } | ||
645 | |||
646 | static int ipmi_heartbeat(void) | ||
647 | { | ||
648 | int rv; | ||
649 | |||
650 | mutex_lock(&ipmi_watchdog_mutex); | ||
651 | rv = _ipmi_heartbeat(); | ||
652 | mutex_unlock(&ipmi_watchdog_mutex); | ||
676 | 653 | ||
677 | return rv; | 654 | return rv; |
678 | } | 655 | } |
@@ -700,7 +677,7 @@ static int ipmi_ioctl(struct file *file, | |||
700 | if (i) | 677 | if (i) |
701 | return -EFAULT; | 678 | return -EFAULT; |
702 | timeout = val; | 679 | timeout = val; |
703 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); | 680 | return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); |
704 | 681 | ||
705 | case WDIOC_GETTIMEOUT: | 682 | case WDIOC_GETTIMEOUT: |
706 | i = copy_to_user(argp, &timeout, sizeof(timeout)); | 683 | i = copy_to_user(argp, &timeout, sizeof(timeout)); |
@@ -713,7 +690,7 @@ static int ipmi_ioctl(struct file *file, | |||
713 | if (i) | 690 | if (i) |
714 | return -EFAULT; | 691 | return -EFAULT; |
715 | pretimeout = val; | 692 | pretimeout = val; |
716 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); | 693 | return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); |
717 | 694 | ||
718 | case WDIOC_GETPRETIMEOUT: | 695 | case WDIOC_GETPRETIMEOUT: |
719 | i = copy_to_user(argp, &pretimeout, sizeof(pretimeout)); | 696 | i = copy_to_user(argp, &pretimeout, sizeof(pretimeout)); |
@@ -722,7 +699,7 @@ static int ipmi_ioctl(struct file *file, | |||
722 | return 0; | 699 | return 0; |
723 | 700 | ||
724 | case WDIOC_KEEPALIVE: | 701 | case WDIOC_KEEPALIVE: |
725 | return ipmi_heartbeat(); | 702 | return _ipmi_heartbeat(); |
726 | 703 | ||
727 | case WDIOC_SETOPTIONS: | 704 | case WDIOC_SETOPTIONS: |
728 | i = copy_from_user(&val, argp, sizeof(int)); | 705 | i = copy_from_user(&val, argp, sizeof(int)); |
@@ -730,13 +707,13 @@ static int ipmi_ioctl(struct file *file, | |||
730 | return -EFAULT; | 707 | return -EFAULT; |
731 | if (val & WDIOS_DISABLECARD) { | 708 | if (val & WDIOS_DISABLECARD) { |
732 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; | 709 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; |
733 | ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); | 710 | _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); |
734 | ipmi_start_timer_on_heartbeat = 0; | 711 | ipmi_start_timer_on_heartbeat = 0; |
735 | } | 712 | } |
736 | 713 | ||
737 | if (val & WDIOS_ENABLECARD) { | 714 | if (val & WDIOS_ENABLECARD) { |
738 | ipmi_watchdog_state = action_val; | 715 | ipmi_watchdog_state = action_val; |
739 | ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | 716 | _ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); |
740 | } | 717 | } |
741 | return 0; | 718 | return 0; |
742 | 719 | ||
@@ -810,7 +787,7 @@ static ssize_t ipmi_read(struct file *file, | |||
810 | * Reading returns if the pretimeout has gone off, and it only does | 787 | * Reading returns if the pretimeout has gone off, and it only does |
811 | * it once per pretimeout. | 788 | * it once per pretimeout. |
812 | */ | 789 | */ |
813 | spin_lock(&ipmi_read_lock); | 790 | spin_lock_irq(&ipmi_read_lock); |
814 | if (!data_to_read) { | 791 | if (!data_to_read) { |
815 | if (file->f_flags & O_NONBLOCK) { | 792 | if (file->f_flags & O_NONBLOCK) { |
816 | rv = -EAGAIN; | 793 | rv = -EAGAIN; |
@@ -821,9 +798,9 @@ static ssize_t ipmi_read(struct file *file, | |||
821 | add_wait_queue(&read_q, &wait); | 798 | add_wait_queue(&read_q, &wait); |
822 | while (!data_to_read) { | 799 | while (!data_to_read) { |
823 | set_current_state(TASK_INTERRUPTIBLE); | 800 | set_current_state(TASK_INTERRUPTIBLE); |
824 | spin_unlock(&ipmi_read_lock); | 801 | spin_unlock_irq(&ipmi_read_lock); |
825 | schedule(); | 802 | schedule(); |
826 | spin_lock(&ipmi_read_lock); | 803 | spin_lock_irq(&ipmi_read_lock); |
827 | } | 804 | } |
828 | remove_wait_queue(&read_q, &wait); | 805 | remove_wait_queue(&read_q, &wait); |
829 | 806 | ||
@@ -835,7 +812,7 @@ static ssize_t ipmi_read(struct file *file, | |||
835 | data_to_read = 0; | 812 | data_to_read = 0; |
836 | 813 | ||
837 | out: | 814 | out: |
838 | spin_unlock(&ipmi_read_lock); | 815 | spin_unlock_irq(&ipmi_read_lock); |
839 | 816 | ||
840 | if (rv == 0) { | 817 | if (rv == 0) { |
841 | if (copy_to_user(buf, &data_to_read, 1)) | 818 | if (copy_to_user(buf, &data_to_read, 1)) |
@@ -873,10 +850,10 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait) | |||
873 | 850 | ||
874 | poll_wait(file, &read_q, wait); | 851 | poll_wait(file, &read_q, wait); |
875 | 852 | ||
876 | spin_lock(&ipmi_read_lock); | 853 | spin_lock_irq(&ipmi_read_lock); |
877 | if (data_to_read) | 854 | if (data_to_read) |
878 | mask |= (EPOLLIN | EPOLLRDNORM); | 855 | mask |= (EPOLLIN | EPOLLRDNORM); |
879 | spin_unlock(&ipmi_read_lock); | 856 | spin_unlock_irq(&ipmi_read_lock); |
880 | 857 | ||
881 | return mask; | 858 | return mask; |
882 | } | 859 | } |
@@ -894,11 +871,13 @@ static int ipmi_close(struct inode *ino, struct file *filep) | |||
894 | { | 871 | { |
895 | if (iminor(ino) == WATCHDOG_MINOR) { | 872 | if (iminor(ino) == WATCHDOG_MINOR) { |
896 | if (expect_close == 42) { | 873 | if (expect_close == 42) { |
874 | mutex_lock(&ipmi_watchdog_mutex); | ||
897 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; | 875 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; |
898 | ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); | 876 | _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); |
877 | mutex_unlock(&ipmi_watchdog_mutex); | ||
899 | } else { | 878 | } else { |
900 | printk(KERN_CRIT PFX | 879 | pr_crit(PFX |
901 | "Unexpected close, not stopping watchdog!\n"); | 880 | "Unexpected close, not stopping watchdog!\n"); |
902 | ipmi_heartbeat(); | 881 | ipmi_heartbeat(); |
903 | } | 882 | } |
904 | clear_bit(0, &ipmi_wdog_open); | 883 | clear_bit(0, &ipmi_wdog_open); |
@@ -932,11 +911,9 @@ static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, | |||
932 | { | 911 | { |
933 | if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER && | 912 | if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER && |
934 | msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) | 913 | msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) |
935 | printk(KERN_INFO PFX "response: The IPMI controller appears" | 914 | pr_info(PFX "response: The IPMI controller appears to have been reset, will attempt to reinitialize the watchdog timer\n"); |
936 | " to have been reset, will attempt to reinitialize" | ||
937 | " the watchdog timer\n"); | ||
938 | else if (msg->msg.data[0] != 0) | 915 | else if (msg->msg.data[0] != 0) |
939 | printk(KERN_ERR PFX "response: Error %x on cmd %x\n", | 916 | pr_err(PFX "response: Error %x on cmd %x\n", |
940 | msg->msg.data[0], | 917 | msg->msg.data[0], |
941 | msg->msg.cmd); | 918 | msg->msg.cmd); |
942 | 919 | ||
@@ -950,12 +927,13 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data) | |||
950 | if (atomic_inc_and_test(&preop_panic_excl)) | 927 | if (atomic_inc_and_test(&preop_panic_excl)) |
951 | panic("Watchdog pre-timeout"); | 928 | panic("Watchdog pre-timeout"); |
952 | } else if (preop_val == WDOG_PREOP_GIVE_DATA) { | 929 | } else if (preop_val == WDOG_PREOP_GIVE_DATA) { |
953 | spin_lock(&ipmi_read_lock); | 930 | unsigned long flags; |
931 | |||
932 | spin_lock_irqsave(&ipmi_read_lock, flags); | ||
954 | data_to_read = 1; | 933 | data_to_read = 1; |
955 | wake_up_interruptible(&read_q); | 934 | wake_up_interruptible(&read_q); |
956 | kill_fasync(&fasync_q, SIGIO, POLL_IN); | 935 | kill_fasync(&fasync_q, SIGIO, POLL_IN); |
957 | 936 | spin_unlock_irqrestore(&ipmi_read_lock, flags); | |
958 | spin_unlock(&ipmi_read_lock); | ||
959 | } | 937 | } |
960 | } | 938 | } |
961 | 939 | ||
@@ -963,12 +941,34 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data) | |||
963 | * On some machines, the heartbeat will give an error and not | 941 | * On some machines, the heartbeat will give an error and not |
964 | * work unless we re-enable the timer. So do so. | 942 | * work unless we re-enable the timer. So do so. |
965 | */ | 943 | */ |
966 | pretimeout_since_last_heartbeat = 1; | 944 | atomic_set(&pretimeout_since_last_heartbeat, 1); |
945 | } | ||
946 | |||
947 | static void ipmi_wdog_panic_handler(void *user_data) | ||
948 | { | ||
949 | static int panic_event_handled; | ||
950 | |||
951 | /* | ||
952 | * On a panic, if we have a panic timeout, make sure to extend | ||
953 | * the watchdog timer to a reasonable value to complete the | ||
954 | * panic, if the watchdog timer is running. Plus the | ||
955 | * pretimeout is meaningless at panic time. | ||
956 | */ | ||
957 | if (watchdog_user && !panic_event_handled && | ||
958 | ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { | ||
959 | /* Make sure we do this only once. */ | ||
960 | panic_event_handled = 1; | ||
961 | |||
962 | timeout = panic_wdt_timeout; | ||
963 | pretimeout = 0; | ||
964 | panic_halt_ipmi_set_timeout(); | ||
965 | } | ||
967 | } | 966 | } |
968 | 967 | ||
969 | static const struct ipmi_user_hndl ipmi_hndlrs = { | 968 | static const struct ipmi_user_hndl ipmi_hndlrs = { |
970 | .ipmi_recv_hndl = ipmi_wdog_msg_handler, | 969 | .ipmi_recv_hndl = ipmi_wdog_msg_handler, |
971 | .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler | 970 | .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler, |
971 | .ipmi_panic_handler = ipmi_wdog_panic_handler | ||
972 | }; | 972 | }; |
973 | 973 | ||
974 | static void ipmi_register_watchdog(int ipmi_intf) | 974 | static void ipmi_register_watchdog(int ipmi_intf) |
@@ -985,7 +985,7 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
985 | 985 | ||
986 | rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user); | 986 | rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user); |
987 | if (rv < 0) { | 987 | if (rv < 0) { |
988 | printk(KERN_CRIT PFX "Unable to register with ipmi\n"); | 988 | pr_crit(PFX "Unable to register with ipmi\n"); |
989 | goto out; | 989 | goto out; |
990 | } | 990 | } |
991 | 991 | ||
@@ -1002,7 +1002,7 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
1002 | if (rv < 0) { | 1002 | if (rv < 0) { |
1003 | ipmi_destroy_user(watchdog_user); | 1003 | ipmi_destroy_user(watchdog_user); |
1004 | watchdog_user = NULL; | 1004 | watchdog_user = NULL; |
1005 | printk(KERN_CRIT PFX "Unable to register misc device\n"); | 1005 | pr_crit(PFX "Unable to register misc device\n"); |
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | #ifdef HAVE_DIE_NMI | 1008 | #ifdef HAVE_DIE_NMI |
@@ -1024,9 +1024,8 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
1024 | 1024 | ||
1025 | rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | 1025 | rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); |
1026 | if (rv) { | 1026 | if (rv) { |
1027 | printk(KERN_WARNING PFX "Error starting timer to" | 1027 | pr_warn(PFX "Error starting timer to test NMI: 0x%x. The NMI pretimeout will likely not work\n", |
1028 | " test NMI: 0x%x. The NMI pretimeout will" | 1028 | rv); |
1029 | " likely not work\n", rv); | ||
1030 | rv = 0; | 1029 | rv = 0; |
1031 | goto out_restore; | 1030 | goto out_restore; |
1032 | } | 1031 | } |
@@ -1034,9 +1033,7 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
1034 | msleep(1500); | 1033 | msleep(1500); |
1035 | 1034 | ||
1036 | if (testing_nmi != 2) { | 1035 | if (testing_nmi != 2) { |
1037 | printk(KERN_WARNING PFX "IPMI NMI didn't seem to" | 1036 | pr_warn(PFX "IPMI NMI didn't seem to occur. The NMI pretimeout will likely not work\n"); |
1038 | " occur. The NMI pretimeout will" | ||
1039 | " likely not work\n"); | ||
1040 | } | 1037 | } |
1041 | out_restore: | 1038 | out_restore: |
1042 | testing_nmi = 0; | 1039 | testing_nmi = 0; |
@@ -1052,7 +1049,7 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
1052 | start_now = 0; /* Disable this function after first startup. */ | 1049 | start_now = 0; /* Disable this function after first startup. */ |
1053 | ipmi_watchdog_state = action_val; | 1050 | ipmi_watchdog_state = action_val; |
1054 | ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | 1051 | ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); |
1055 | printk(KERN_INFO PFX "Starting now!\n"); | 1052 | pr_info(PFX "Starting now!\n"); |
1056 | } else { | 1053 | } else { |
1057 | /* Stop the timer now. */ | 1054 | /* Stop the timer now. */ |
1058 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; | 1055 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; |
@@ -1063,34 +1060,38 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
1063 | static void ipmi_unregister_watchdog(int ipmi_intf) | 1060 | static void ipmi_unregister_watchdog(int ipmi_intf) |
1064 | { | 1061 | { |
1065 | int rv; | 1062 | int rv; |
1063 | struct ipmi_user *loc_user = watchdog_user; | ||
1066 | 1064 | ||
1067 | if (!watchdog_user) | 1065 | if (!loc_user) |
1068 | goto out; | 1066 | return; |
1069 | 1067 | ||
1070 | if (watchdog_ifnum != ipmi_intf) | 1068 | if (watchdog_ifnum != ipmi_intf) |
1071 | goto out; | 1069 | return; |
1072 | 1070 | ||
1073 | /* Make sure no one can call us any more. */ | 1071 | /* Make sure no one can call us any more. */ |
1074 | misc_deregister(&ipmi_wdog_miscdev); | 1072 | misc_deregister(&ipmi_wdog_miscdev); |
1075 | 1073 | ||
1074 | watchdog_user = NULL; | ||
1075 | |||
1076 | /* | 1076 | /* |
1077 | * Wait to make sure the message makes it out. The lower layer has | 1077 | * Wait to make sure the message makes it out. The lower layer has |
1078 | * pointers to our buffers, we want to make sure they are done before | 1078 | * pointers to our buffers, we want to make sure they are done before |
1079 | * we release our memory. | 1079 | * we release our memory. |
1080 | */ | 1080 | */ |
1081 | while (atomic_read(&set_timeout_tofree)) | 1081 | while (atomic_read(&msg_tofree)) |
1082 | schedule_timeout_uninterruptible(1); | 1082 | msg_free_smi(NULL); |
1083 | |||
1084 | mutex_lock(&ipmi_watchdog_mutex); | ||
1083 | 1085 | ||
1084 | /* Disconnect from IPMI. */ | 1086 | /* Disconnect from IPMI. */ |
1085 | rv = ipmi_destroy_user(watchdog_user); | 1087 | rv = ipmi_destroy_user(loc_user); |
1086 | if (rv) { | 1088 | if (rv) |
1087 | printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n", | 1089 | pr_warn(PFX "error unlinking from IPMI: %d\n", rv); |
1088 | rv); | ||
1089 | } | ||
1090 | watchdog_user = NULL; | ||
1091 | 1090 | ||
1092 | out: | 1091 | /* If it comes back, restart it properly. */ |
1093 | return; | 1092 | ipmi_start_timer_on_heartbeat = 1; |
1093 | |||
1094 | mutex_unlock(&ipmi_watchdog_mutex); | ||
1094 | } | 1095 | } |
1095 | 1096 | ||
1096 | #ifdef HAVE_DIE_NMI | 1097 | #ifdef HAVE_DIE_NMI |
@@ -1124,7 +1125,7 @@ ipmi_nmi(unsigned int val, struct pt_regs *regs) | |||
1124 | /* On some machines, the heartbeat will give | 1125 | /* On some machines, the heartbeat will give |
1125 | an error and not work unless we re-enable | 1126 | an error and not work unless we re-enable |
1126 | the timer. So do so. */ | 1127 | the timer. So do so. */ |
1127 | pretimeout_since_last_heartbeat = 1; | 1128 | atomic_set(&pretimeout_since_last_heartbeat, 1); |
1128 | if (atomic_inc_and_test(&preop_panic_excl)) | 1129 | if (atomic_inc_and_test(&preop_panic_excl)) |
1129 | nmi_panic(regs, PFX "pre-timeout"); | 1130 | nmi_panic(regs, PFX "pre-timeout"); |
1130 | } | 1131 | } |
@@ -1167,36 +1168,6 @@ static struct notifier_block wdog_reboot_notifier = { | |||
1167 | .priority = 0 | 1168 | .priority = 0 |
1168 | }; | 1169 | }; |
1169 | 1170 | ||
1170 | static int wdog_panic_handler(struct notifier_block *this, | ||
1171 | unsigned long event, | ||
1172 | void *unused) | ||
1173 | { | ||
1174 | static int panic_event_handled; | ||
1175 | |||
1176 | /* On a panic, if we have a panic timeout, make sure to extend | ||
1177 | the watchdog timer to a reasonable value to complete the | ||
1178 | panic, if the watchdog timer is running. Plus the | ||
1179 | pretimeout is meaningless at panic time. */ | ||
1180 | if (watchdog_user && !panic_event_handled && | ||
1181 | ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { | ||
1182 | /* Make sure we do this only once. */ | ||
1183 | panic_event_handled = 1; | ||
1184 | |||
1185 | timeout = panic_wdt_timeout; | ||
1186 | pretimeout = 0; | ||
1187 | panic_halt_ipmi_set_timeout(); | ||
1188 | } | ||
1189 | |||
1190 | return NOTIFY_OK; | ||
1191 | } | ||
1192 | |||
1193 | static struct notifier_block wdog_panic_notifier = { | ||
1194 | .notifier_call = wdog_panic_handler, | ||
1195 | .next = NULL, | ||
1196 | .priority = 150 /* priority: INT_MAX >= x >= 0 */ | ||
1197 | }; | ||
1198 | |||
1199 | |||
1200 | static void ipmi_new_smi(int if_num, struct device *device) | 1171 | static void ipmi_new_smi(int if_num, struct device *device) |
1201 | { | 1172 | { |
1202 | ipmi_register_watchdog(if_num); | 1173 | ipmi_register_watchdog(if_num); |
@@ -1288,9 +1259,7 @@ static void check_parms(void) | |||
1288 | if (preaction_val == WDOG_PRETIMEOUT_NMI) { | 1259 | if (preaction_val == WDOG_PRETIMEOUT_NMI) { |
1289 | do_nmi = 1; | 1260 | do_nmi = 1; |
1290 | if (preop_val == WDOG_PREOP_GIVE_DATA) { | 1261 | if (preop_val == WDOG_PREOP_GIVE_DATA) { |
1291 | printk(KERN_WARNING PFX "Pretimeout op is to give data" | 1262 | pr_warn(PFX "Pretimeout op is to give data but NMI pretimeout is enabled, setting pretimeout op to none\n"); |
1292 | " but NMI pretimeout is enabled, setting" | ||
1293 | " pretimeout op to none\n"); | ||
1294 | preop_op("preop_none", NULL); | 1263 | preop_op("preop_none", NULL); |
1295 | do_nmi = 0; | 1264 | do_nmi = 0; |
1296 | } | 1265 | } |
@@ -1299,8 +1268,7 @@ static void check_parms(void) | |||
1299 | rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0, | 1268 | rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0, |
1300 | "ipmi"); | 1269 | "ipmi"); |
1301 | if (rv) { | 1270 | if (rv) { |
1302 | printk(KERN_WARNING PFX | 1271 | pr_warn(PFX "Can't register nmi handler\n"); |
1303 | "Can't register nmi handler\n"); | ||
1304 | return; | 1272 | return; |
1305 | } else | 1273 | } else |
1306 | nmi_handler_registered = 1; | 1274 | nmi_handler_registered = 1; |
@@ -1317,27 +1285,24 @@ static int __init ipmi_wdog_init(void) | |||
1317 | 1285 | ||
1318 | if (action_op(action, NULL)) { | 1286 | if (action_op(action, NULL)) { |
1319 | action_op("reset", NULL); | 1287 | action_op("reset", NULL); |
1320 | printk(KERN_INFO PFX "Unknown action '%s', defaulting to" | 1288 | pr_info(PFX "Unknown action '%s', defaulting to reset\n", |
1321 | " reset\n", action); | 1289 | action); |
1322 | } | 1290 | } |
1323 | 1291 | ||
1324 | if (preaction_op(preaction, NULL)) { | 1292 | if (preaction_op(preaction, NULL)) { |
1325 | preaction_op("pre_none", NULL); | 1293 | preaction_op("pre_none", NULL); |
1326 | printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to" | 1294 | pr_info(PFX "Unknown preaction '%s', defaulting to none\n", |
1327 | " none\n", preaction); | 1295 | preaction); |
1328 | } | 1296 | } |
1329 | 1297 | ||
1330 | if (preop_op(preop, NULL)) { | 1298 | if (preop_op(preop, NULL)) { |
1331 | preop_op("preop_none", NULL); | 1299 | preop_op("preop_none", NULL); |
1332 | printk(KERN_INFO PFX "Unknown preop '%s', defaulting to" | 1300 | pr_info(PFX "Unknown preop '%s', defaulting to none\n", preop); |
1333 | " none\n", preop); | ||
1334 | } | 1301 | } |
1335 | 1302 | ||
1336 | check_parms(); | 1303 | check_parms(); |
1337 | 1304 | ||
1338 | register_reboot_notifier(&wdog_reboot_notifier); | 1305 | register_reboot_notifier(&wdog_reboot_notifier); |
1339 | atomic_notifier_chain_register(&panic_notifier_list, | ||
1340 | &wdog_panic_notifier); | ||
1341 | 1306 | ||
1342 | rv = ipmi_smi_watcher_register(&smi_watcher); | 1307 | rv = ipmi_smi_watcher_register(&smi_watcher); |
1343 | if (rv) { | 1308 | if (rv) { |
@@ -1345,14 +1310,12 @@ static int __init ipmi_wdog_init(void) | |||
1345 | if (nmi_handler_registered) | 1310 | if (nmi_handler_registered) |
1346 | unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); | 1311 | unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); |
1347 | #endif | 1312 | #endif |
1348 | atomic_notifier_chain_unregister(&panic_notifier_list, | ||
1349 | &wdog_panic_notifier); | ||
1350 | unregister_reboot_notifier(&wdog_reboot_notifier); | 1313 | unregister_reboot_notifier(&wdog_reboot_notifier); |
1351 | printk(KERN_WARNING PFX "can't register smi watcher\n"); | 1314 | pr_warn(PFX "can't register smi watcher\n"); |
1352 | return rv; | 1315 | return rv; |
1353 | } | 1316 | } |
1354 | 1317 | ||
1355 | printk(KERN_INFO PFX "driver initialized\n"); | 1318 | pr_info(PFX "driver initialized\n"); |
1356 | 1319 | ||
1357 | return 0; | 1320 | return 0; |
1358 | } | 1321 | } |
@@ -1367,8 +1330,6 @@ static void __exit ipmi_wdog_exit(void) | |||
1367 | unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); | 1330 | unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); |
1368 | #endif | 1331 | #endif |
1369 | 1332 | ||
1370 | atomic_notifier_chain_unregister(&panic_notifier_list, | ||
1371 | &wdog_panic_notifier); | ||
1372 | unregister_reboot_notifier(&wdog_reboot_notifier); | 1333 | unregister_reboot_notifier(&wdog_reboot_notifier); |
1373 | } | 1334 | } |
1374 | module_exit(ipmi_wdog_exit); | 1335 | module_exit(ipmi_wdog_exit); |
diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c new file mode 100644 index 000000000000..722f7391fe1f --- /dev/null +++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c | |||
@@ -0,0 +1,215 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2018, Nuvoton Corporation. | ||
4 | * Copyright (c) 2018, Intel Corporation. | ||
5 | */ | ||
6 | |||
7 | #define pr_fmt(fmt) "nuvoton-kcs-bmc: " fmt | ||
8 | |||
9 | #include <linux/atomic.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/mfd/syscon.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/regmap.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | #include "kcs_bmc.h" | ||
21 | |||
22 | #define DEVICE_NAME "npcm-kcs-bmc" | ||
23 | #define KCS_CHANNEL_MAX 3 | ||
24 | |||
25 | #define KCS1ST 0x0C | ||
26 | #define KCS2ST 0x1E | ||
27 | #define KCS3ST 0x30 | ||
28 | |||
29 | #define KCS1DO 0x0E | ||
30 | #define KCS2DO 0x20 | ||
31 | #define KCS3DO 0x32 | ||
32 | |||
33 | #define KCS1DI 0x10 | ||
34 | #define KCS2DI 0x22 | ||
35 | #define KCS3DI 0x34 | ||
36 | |||
37 | #define KCS1CTL 0x18 | ||
38 | #define KCS2CTL 0x2A | ||
39 | #define KCS3CTL 0x3C | ||
40 | #define KCS_CTL_IBFIE BIT(0) | ||
41 | |||
42 | #define KCS1IE 0x1C | ||
43 | #define KCS2IE 0x2E | ||
44 | #define KCS3IE 0x40 | ||
45 | #define KCS_IE_IRQE BIT(0) | ||
46 | #define KCS_IE_HIRQE BIT(3) | ||
47 | |||
48 | /* | ||
49 | * 7.2.4 Core KCS Registers | ||
50 | * Registers in this module are 8 bits. An 8-bit register must be accessed | ||
51 | * by an 8-bit read or write. | ||
52 | * | ||
53 | * sts: KCS Channel n Status Register (KCSnST). | ||
54 | * dob: KCS Channel n Data Out Buffer Register (KCSnDO). | ||
55 | * dib: KCS Channel n Data In Buffer Register (KCSnDI). | ||
56 | * ctl: KCS Channel n Control Register (KCSnCTL). | ||
57 | * ie : KCS Channel n Interrupt Enable Register (KCSnIE). | ||
58 | */ | ||
59 | struct npcm7xx_kcs_reg { | ||
60 | u32 sts; | ||
61 | u32 dob; | ||
62 | u32 dib; | ||
63 | u32 ctl; | ||
64 | u32 ie; | ||
65 | }; | ||
66 | |||
67 | struct npcm7xx_kcs_bmc { | ||
68 | struct regmap *map; | ||
69 | |||
70 | const struct npcm7xx_kcs_reg *reg; | ||
71 | }; | ||
72 | |||
73 | static const struct npcm7xx_kcs_reg npcm7xx_kcs_reg_tbl[KCS_CHANNEL_MAX] = { | ||
74 | { .sts = KCS1ST, .dob = KCS1DO, .dib = KCS1DI, .ctl = KCS1CTL, .ie = KCS1IE }, | ||
75 | { .sts = KCS2ST, .dob = KCS2DO, .dib = KCS2DI, .ctl = KCS2CTL, .ie = KCS2IE }, | ||
76 | { .sts = KCS3ST, .dob = KCS3DO, .dib = KCS3DI, .ctl = KCS3CTL, .ie = KCS3IE }, | ||
77 | }; | ||
78 | |||
79 | static u8 npcm7xx_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg) | ||
80 | { | ||
81 | struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc); | ||
82 | u32 val = 0; | ||
83 | int rc; | ||
84 | |||
85 | rc = regmap_read(priv->map, reg, &val); | ||
86 | WARN(rc != 0, "regmap_read() failed: %d\n", rc); | ||
87 | |||
88 | return rc == 0 ? (u8)val : 0; | ||
89 | } | ||
90 | |||
91 | static void npcm7xx_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data) | ||
92 | { | ||
93 | struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc); | ||
94 | int rc; | ||
95 | |||
96 | rc = regmap_write(priv->map, reg, data); | ||
97 | WARN(rc != 0, "regmap_write() failed: %d\n", rc); | ||
98 | } | ||
99 | |||
100 | static void npcm7xx_kcs_enable_channel(struct kcs_bmc *kcs_bmc, bool enable) | ||
101 | { | ||
102 | struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc); | ||
103 | |||
104 | regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE, | ||
105 | enable ? KCS_CTL_IBFIE : 0); | ||
106 | |||
107 | regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE, | ||
108 | enable ? KCS_IE_IRQE | KCS_IE_HIRQE : 0); | ||
109 | } | ||
110 | |||
111 | static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg) | ||
112 | { | ||
113 | struct kcs_bmc *kcs_bmc = arg; | ||
114 | |||
115 | if (!kcs_bmc_handle_event(kcs_bmc)) | ||
116 | return IRQ_HANDLED; | ||
117 | |||
118 | return IRQ_NONE; | ||
119 | } | ||
120 | |||
121 | static int npcm7xx_kcs_config_irq(struct kcs_bmc *kcs_bmc, | ||
122 | struct platform_device *pdev) | ||
123 | { | ||
124 | struct device *dev = &pdev->dev; | ||
125 | int irq; | ||
126 | |||
127 | irq = platform_get_irq(pdev, 0); | ||
128 | if (irq < 0) | ||
129 | return irq; | ||
130 | |||
131 | return devm_request_irq(dev, irq, npcm7xx_kcs_irq, IRQF_SHARED, | ||
132 | dev_name(dev), kcs_bmc); | ||
133 | } | ||
134 | |||
135 | static int npcm7xx_kcs_probe(struct platform_device *pdev) | ||
136 | { | ||
137 | struct device *dev = &pdev->dev; | ||
138 | struct npcm7xx_kcs_bmc *priv; | ||
139 | struct kcs_bmc *kcs_bmc; | ||
140 | u32 chan; | ||
141 | int rc; | ||
142 | |||
143 | rc = of_property_read_u32(dev->of_node, "kcs_chan", &chan); | ||
144 | if (rc != 0 || chan == 0 || chan > KCS_CHANNEL_MAX) { | ||
145 | dev_err(dev, "no valid 'kcs_chan' configured\n"); | ||
146 | return -ENODEV; | ||
147 | } | ||
148 | |||
149 | kcs_bmc = kcs_bmc_alloc(dev, sizeof(*priv), chan); | ||
150 | if (!kcs_bmc) | ||
151 | return -ENOMEM; | ||
152 | |||
153 | priv = kcs_bmc_priv(kcs_bmc); | ||
154 | priv->map = syscon_node_to_regmap(dev->parent->of_node); | ||
155 | if (IS_ERR(priv->map)) { | ||
156 | dev_err(dev, "Couldn't get regmap\n"); | ||
157 | return -ENODEV; | ||
158 | } | ||
159 | priv->reg = &npcm7xx_kcs_reg_tbl[chan - 1]; | ||
160 | |||
161 | kcs_bmc->ioreg.idr = priv->reg->dib; | ||
162 | kcs_bmc->ioreg.odr = priv->reg->dob; | ||
163 | kcs_bmc->ioreg.str = priv->reg->sts; | ||
164 | kcs_bmc->io_inputb = npcm7xx_kcs_inb; | ||
165 | kcs_bmc->io_outputb = npcm7xx_kcs_outb; | ||
166 | |||
167 | dev_set_drvdata(dev, kcs_bmc); | ||
168 | |||
169 | npcm7xx_kcs_enable_channel(kcs_bmc, true); | ||
170 | rc = npcm7xx_kcs_config_irq(kcs_bmc, pdev); | ||
171 | if (rc) | ||
172 | return rc; | ||
173 | |||
174 | rc = misc_register(&kcs_bmc->miscdev); | ||
175 | if (rc) { | ||
176 | dev_err(dev, "Unable to register device\n"); | ||
177 | return rc; | ||
178 | } | ||
179 | |||
180 | pr_info("channel=%u idr=0x%x odr=0x%x str=0x%x\n", | ||
181 | chan, | ||
182 | kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr, kcs_bmc->ioreg.str); | ||
183 | |||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static int npcm7xx_kcs_remove(struct platform_device *pdev) | ||
188 | { | ||
189 | struct kcs_bmc *kcs_bmc = dev_get_drvdata(&pdev->dev); | ||
190 | |||
191 | misc_deregister(&kcs_bmc->miscdev); | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static const struct of_device_id npcm_kcs_bmc_match[] = { | ||
197 | { .compatible = "nuvoton,npcm750-kcs-bmc" }, | ||
198 | { } | ||
199 | }; | ||
200 | MODULE_DEVICE_TABLE(of, npcm_kcs_bmc_match); | ||
201 | |||
202 | static struct platform_driver npcm_kcs_bmc_driver = { | ||
203 | .driver = { | ||
204 | .name = DEVICE_NAME, | ||
205 | .of_match_table = npcm_kcs_bmc_match, | ||
206 | }, | ||
207 | .probe = npcm7xx_kcs_probe, | ||
208 | .remove = npcm7xx_kcs_remove, | ||
209 | }; | ||
210 | module_platform_driver(npcm_kcs_bmc_driver); | ||
211 | |||
212 | MODULE_LICENSE("GPL v2"); | ||
213 | MODULE_AUTHOR("Avi Fishman <avifishman70@gmail.com>"); | ||
214 | MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>"); | ||
215 | MODULE_DESCRIPTION("NPCM7xx device interface to the KCS BMC device"); | ||
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 8b0626cec980..41f5c086f670 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
@@ -23,8 +23,10 @@ | |||
23 | struct module; | 23 | struct module; |
24 | struct device; | 24 | struct device; |
25 | 25 | ||
26 | /* Opaque type for a IPMI message user. One of these is needed to | 26 | /* |
27 | send and receive messages. */ | 27 | * Opaque type for a IPMI message user. One of these is needed to |
28 | * send and receive messages. | ||
29 | */ | ||
28 | typedef struct ipmi_user *ipmi_user_t; | 30 | typedef struct ipmi_user *ipmi_user_t; |
29 | 31 | ||
30 | /* | 32 | /* |
@@ -37,28 +39,36 @@ typedef struct ipmi_user *ipmi_user_t; | |||
37 | struct ipmi_recv_msg { | 39 | struct ipmi_recv_msg { |
38 | struct list_head link; | 40 | struct list_head link; |
39 | 41 | ||
40 | /* The type of message as defined in the "Receive Types" | 42 | /* |
41 | defines above. */ | 43 | * The type of message as defined in the "Receive Types" |
44 | * defines above. | ||
45 | */ | ||
42 | int recv_type; | 46 | int recv_type; |
43 | 47 | ||
44 | ipmi_user_t user; | 48 | struct ipmi_user *user; |
45 | struct ipmi_addr addr; | 49 | struct ipmi_addr addr; |
46 | long msgid; | 50 | long msgid; |
47 | struct kernel_ipmi_msg msg; | 51 | struct kernel_ipmi_msg msg; |
48 | 52 | ||
49 | /* The user_msg_data is the data supplied when a message was | 53 | /* |
50 | sent, if this is a response to a sent message. If this is | 54 | * The user_msg_data is the data supplied when a message was |
51 | not a response to a sent message, then user_msg_data will | 55 | * sent, if this is a response to a sent message. If this is |
52 | be NULL. If the user above is NULL, then this will be the | 56 | * not a response to a sent message, then user_msg_data will |
53 | intf. */ | 57 | * be NULL. If the user above is NULL, then this will be the |
58 | * intf. | ||
59 | */ | ||
54 | void *user_msg_data; | 60 | void *user_msg_data; |
55 | 61 | ||
56 | /* Call this when done with the message. It will presumably free | 62 | /* |
57 | the message and do any other necessary cleanup. */ | 63 | * Call this when done with the message. It will presumably free |
64 | * the message and do any other necessary cleanup. | ||
65 | */ | ||
58 | void (*done)(struct ipmi_recv_msg *msg); | 66 | void (*done)(struct ipmi_recv_msg *msg); |
59 | 67 | ||
60 | /* Place-holder for the data, don't make any assumptions about | 68 | /* |
61 | the size or existence of this, since it may change. */ | 69 | * Place-holder for the data, don't make any assumptions about |
70 | * the size or existence of this, since it may change. | ||
71 | */ | ||
62 | unsigned char msg_data[IPMI_MAX_MSG_LENGTH]; | 72 | unsigned char msg_data[IPMI_MAX_MSG_LENGTH]; |
63 | }; | 73 | }; |
64 | 74 | ||
@@ -66,54 +76,77 @@ struct ipmi_recv_msg { | |||
66 | void ipmi_free_recv_msg(struct ipmi_recv_msg *msg); | 76 | void ipmi_free_recv_msg(struct ipmi_recv_msg *msg); |
67 | 77 | ||
68 | struct ipmi_user_hndl { | 78 | struct ipmi_user_hndl { |
69 | /* Routine type to call when a message needs to be routed to | 79 | /* |
70 | the upper layer. This will be called with some locks held, | 80 | * Routine type to call when a message needs to be routed to |
71 | the only IPMI routines that can be called are ipmi_request | 81 | * the upper layer. This will be called with some locks held, |
72 | and the alloc/free operations. The handler_data is the | 82 | * the only IPMI routines that can be called are ipmi_request |
73 | variable supplied when the receive handler was registered. */ | 83 | * and the alloc/free operations. The handler_data is the |
84 | * variable supplied when the receive handler was registered. | ||
85 | */ | ||
74 | void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg, | 86 | void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg, |
75 | void *user_msg_data); | 87 | void *user_msg_data); |
76 | 88 | ||
77 | /* Called when the interface detects a watchdog pre-timeout. If | 89 | /* |
78 | this is NULL, it will be ignored for the user. */ | 90 | * Called when the interface detects a watchdog pre-timeout. If |
91 | * this is NULL, it will be ignored for the user. | ||
92 | */ | ||
79 | void (*ipmi_watchdog_pretimeout)(void *handler_data); | 93 | void (*ipmi_watchdog_pretimeout)(void *handler_data); |
94 | |||
95 | /* | ||
96 | * If not NULL, called at panic time after the interface has | ||
97 | * been set up to handle run to completion. | ||
98 | */ | ||
99 | void (*ipmi_panic_handler)(void *handler_data); | ||
100 | |||
101 | /* | ||
102 | * Called when the interface has been removed. After this returns | ||
103 | * the user handle will be invalid. The interface may or may | ||
104 | * not be usable when this is called, but it will return errors | ||
105 | * if it is not usable. | ||
106 | */ | ||
107 | void (*shutdown)(void *handler_data); | ||
80 | }; | 108 | }; |
81 | 109 | ||
82 | /* Create a new user of the IPMI layer on the given interface number. */ | 110 | /* Create a new user of the IPMI layer on the given interface number. */ |
83 | int ipmi_create_user(unsigned int if_num, | 111 | int ipmi_create_user(unsigned int if_num, |
84 | const struct ipmi_user_hndl *handler, | 112 | const struct ipmi_user_hndl *handler, |
85 | void *handler_data, | 113 | void *handler_data, |
86 | ipmi_user_t *user); | 114 | struct ipmi_user **user); |
87 | 115 | ||
88 | /* Destroy the given user of the IPMI layer. Note that after this | 116 | /* |
89 | function returns, the system is guaranteed to not call any | 117 | * Destroy the given user of the IPMI layer. Note that after this |
90 | callbacks for the user. Thus as long as you destroy all the users | 118 | * function returns, the system is guaranteed to not call any |
91 | before you unload a module, you will be safe. And if you destroy | 119 | * callbacks for the user. Thus as long as you destroy all the users |
92 | the users before you destroy the callback structures, it should be | 120 | * before you unload a module, you will be safe. And if you destroy |
93 | safe, too. */ | 121 | * the users before you destroy the callback structures, it should be |
94 | int ipmi_destroy_user(ipmi_user_t user); | 122 | * safe, too. |
123 | */ | ||
124 | int ipmi_destroy_user(struct ipmi_user *user); | ||
95 | 125 | ||
96 | /* Get the IPMI version of the BMC we are talking to. */ | 126 | /* Get the IPMI version of the BMC we are talking to. */ |
97 | int ipmi_get_version(ipmi_user_t user, | 127 | int ipmi_get_version(struct ipmi_user *user, |
98 | unsigned char *major, | 128 | unsigned char *major, |
99 | unsigned char *minor); | 129 | unsigned char *minor); |
100 | 130 | ||
101 | /* Set and get the slave address and LUN that we will use for our | 131 | /* |
102 | source messages. Note that this affects the interface, not just | 132 | * Set and get the slave address and LUN that we will use for our |
103 | this user, so it will affect all users of this interface. This is | 133 | * source messages. Note that this affects the interface, not just |
104 | so some initialization code can come in and do the OEM-specific | 134 | * this user, so it will affect all users of this interface. This is |
105 | things it takes to determine your address (if not the BMC) and set | 135 | * so some initialization code can come in and do the OEM-specific |
106 | it for everyone else. Note that each channel can have its own address. */ | 136 | * things it takes to determine your address (if not the BMC) and set |
107 | int ipmi_set_my_address(ipmi_user_t user, | 137 | * it for everyone else. Note that each channel can have its own |
138 | * address. | ||
139 | */ | ||
140 | int ipmi_set_my_address(struct ipmi_user *user, | ||
108 | unsigned int channel, | 141 | unsigned int channel, |
109 | unsigned char address); | 142 | unsigned char address); |
110 | int ipmi_get_my_address(ipmi_user_t user, | 143 | int ipmi_get_my_address(struct ipmi_user *user, |
111 | unsigned int channel, | 144 | unsigned int channel, |
112 | unsigned char *address); | 145 | unsigned char *address); |
113 | int ipmi_set_my_LUN(ipmi_user_t user, | 146 | int ipmi_set_my_LUN(struct ipmi_user *user, |
114 | unsigned int channel, | 147 | unsigned int channel, |
115 | unsigned char LUN); | 148 | unsigned char LUN); |
116 | int ipmi_get_my_LUN(ipmi_user_t user, | 149 | int ipmi_get_my_LUN(struct ipmi_user *user, |
117 | unsigned int channel, | 150 | unsigned int channel, |
118 | unsigned char *LUN); | 151 | unsigned char *LUN); |
119 | 152 | ||
@@ -130,7 +163,7 @@ int ipmi_get_my_LUN(ipmi_user_t user, | |||
130 | * it makes no sense to do it here. However, this can be used if you | 163 | * it makes no sense to do it here. However, this can be used if you |
131 | * have unusual requirements. | 164 | * have unusual requirements. |
132 | */ | 165 | */ |
133 | int ipmi_request_settime(ipmi_user_t user, | 166 | int ipmi_request_settime(struct ipmi_user *user, |
134 | struct ipmi_addr *addr, | 167 | struct ipmi_addr *addr, |
135 | long msgid, | 168 | long msgid, |
136 | struct kernel_ipmi_msg *msg, | 169 | struct kernel_ipmi_msg *msg, |
@@ -148,7 +181,7 @@ int ipmi_request_settime(ipmi_user_t user, | |||
148 | * change as the system changes, so don't use it unless you REALLY | 181 | * change as the system changes, so don't use it unless you REALLY |
149 | * have to. | 182 | * have to. |
150 | */ | 183 | */ |
151 | int ipmi_request_supply_msgs(ipmi_user_t user, | 184 | int ipmi_request_supply_msgs(struct ipmi_user *user, |
152 | struct ipmi_addr *addr, | 185 | struct ipmi_addr *addr, |
153 | long msgid, | 186 | long msgid, |
154 | struct kernel_ipmi_msg *msg, | 187 | struct kernel_ipmi_msg *msg, |
@@ -164,7 +197,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user, | |||
164 | * way. This is useful if you need to spin waiting for something to | 197 | * way. This is useful if you need to spin waiting for something to |
165 | * happen in the IPMI driver. | 198 | * happen in the IPMI driver. |
166 | */ | 199 | */ |
167 | void ipmi_poll_interface(ipmi_user_t user); | 200 | void ipmi_poll_interface(struct ipmi_user *user); |
168 | 201 | ||
169 | /* | 202 | /* |
170 | * When commands come in to the SMS, the user can register to receive | 203 | * When commands come in to the SMS, the user can register to receive |
@@ -175,11 +208,11 @@ void ipmi_poll_interface(ipmi_user_t user); | |||
175 | * error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to | 208 | * error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to |
176 | * mean all channels. | 209 | * mean all channels. |
177 | */ | 210 | */ |
178 | int ipmi_register_for_cmd(ipmi_user_t user, | 211 | int ipmi_register_for_cmd(struct ipmi_user *user, |
179 | unsigned char netfn, | 212 | unsigned char netfn, |
180 | unsigned char cmd, | 213 | unsigned char cmd, |
181 | unsigned int chans); | 214 | unsigned int chans); |
182 | int ipmi_unregister_for_cmd(ipmi_user_t user, | 215 | int ipmi_unregister_for_cmd(struct ipmi_user *user, |
183 | unsigned char netfn, | 216 | unsigned char netfn, |
184 | unsigned char cmd, | 217 | unsigned char cmd, |
185 | unsigned int chans); | 218 | unsigned int chans); |
@@ -210,8 +243,8 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, | |||
210 | * | 243 | * |
211 | * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means. | 244 | * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means. |
212 | */ | 245 | */ |
213 | int ipmi_get_maintenance_mode(ipmi_user_t user); | 246 | int ipmi_get_maintenance_mode(struct ipmi_user *user); |
214 | int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); | 247 | int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode); |
215 | 248 | ||
216 | /* | 249 | /* |
217 | * When the user is created, it will not receive IPMI events by | 250 | * When the user is created, it will not receive IPMI events by |
@@ -219,7 +252,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); | |||
219 | * The first user that sets this to TRUE will receive all events that | 252 | * The first user that sets this to TRUE will receive all events that |
220 | * have been queued while no one was waiting for events. | 253 | * have been queued while no one was waiting for events. |
221 | */ | 254 | */ |
222 | int ipmi_set_gets_events(ipmi_user_t user, bool val); | 255 | int ipmi_set_gets_events(struct ipmi_user *user, bool val); |
223 | 256 | ||
224 | /* | 257 | /* |
225 | * Called when a new SMI is registered. This will also be called on | 258 | * Called when a new SMI is registered. This will also be called on |
@@ -229,14 +262,18 @@ int ipmi_set_gets_events(ipmi_user_t user, bool val); | |||
229 | struct ipmi_smi_watcher { | 262 | struct ipmi_smi_watcher { |
230 | struct list_head link; | 263 | struct list_head link; |
231 | 264 | ||
232 | /* You must set the owner to the current module, if you are in | 265 | /* |
233 | a module (generally just set it to "THIS_MODULE"). */ | 266 | * You must set the owner to the current module, if you are in |
267 | * a module (generally just set it to "THIS_MODULE"). | ||
268 | */ | ||
234 | struct module *owner; | 269 | struct module *owner; |
235 | 270 | ||
236 | /* These two are called with read locks held for the interface | 271 | /* |
237 | the watcher list. So you can add and remove users from the | 272 | * These two are called with read locks held for the interface |
238 | IPMI interface, send messages, etc., but you cannot add | 273 | * the watcher list. So you can add and remove users from the |
239 | or remove SMI watchers or SMI interfaces. */ | 274 | * IPMI interface, send messages, etc., but you cannot add |
275 | * or remove SMI watchers or SMI interfaces. | ||
276 | */ | ||
240 | void (*new_smi)(int if_num, struct device *dev); | 277 | void (*new_smi)(int if_num, struct device *dev); |
241 | void (*smi_gone)(int if_num); | 278 | void (*smi_gone)(int if_num); |
242 | }; | 279 | }; |
@@ -244,8 +281,10 @@ struct ipmi_smi_watcher { | |||
244 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher); | 281 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher); |
245 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher); | 282 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher); |
246 | 283 | ||
247 | /* The following are various helper functions for dealing with IPMI | 284 | /* |
248 | addresses. */ | 285 | * The following are various helper functions for dealing with IPMI |
286 | * addresses. | ||
287 | */ | ||
249 | 288 | ||
250 | /* Return the maximum length of an IPMI address given it's type. */ | 289 | /* Return the maximum length of an IPMI address given it's type. */ |
251 | unsigned int ipmi_addr_length(int addr_type); | 290 | unsigned int ipmi_addr_length(int addr_type); |
@@ -291,7 +330,7 @@ struct ipmi_smi_info { | |||
291 | union ipmi_smi_info_union addr_info; | 330 | union ipmi_smi_info_union addr_info; |
292 | }; | 331 | }; |
293 | 332 | ||
294 | /* This is to get the private info of ipmi_smi_t */ | 333 | /* This is to get the private info of struct ipmi_smi */ |
295 | extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data); | 334 | extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data); |
296 | 335 | ||
297 | #endif /* __LINUX_IPMI_H */ | 336 | #endif /* __LINUX_IPMI_H */ |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index af457b5a689e..7d5fd38d5282 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
@@ -22,8 +22,10 @@ | |||
22 | 22 | ||
23 | struct device; | 23 | struct device; |
24 | 24 | ||
25 | /* This files describes the interface for IPMI system management interface | 25 | /* |
26 | drivers to bind into the IPMI message handler. */ | 26 | * This files describes the interface for IPMI system management interface |
27 | * drivers to bind into the IPMI message handler. | ||
28 | */ | ||
27 | 29 | ||
28 | /* Structure for the low-level drivers. */ | 30 | /* Structure for the low-level drivers. */ |
29 | typedef struct ipmi_smi *ipmi_smi_t; | 31 | typedef struct ipmi_smi *ipmi_smi_t; |
@@ -61,12 +63,20 @@ struct ipmi_smi_msg { | |||
61 | struct ipmi_smi_handlers { | 63 | struct ipmi_smi_handlers { |
62 | struct module *owner; | 64 | struct module *owner; |
63 | 65 | ||
64 | /* The low-level interface cannot start sending messages to | 66 | /* |
65 | the upper layer until this function is called. This may | 67 | * The low-level interface cannot start sending messages to |
66 | not be NULL, the lower layer must take the interface from | 68 | * the upper layer until this function is called. This may |
67 | this call. */ | 69 | * not be NULL, the lower layer must take the interface from |
68 | int (*start_processing)(void *send_info, | 70 | * this call. |
69 | ipmi_smi_t new_intf); | 71 | */ |
72 | int (*start_processing)(void *send_info, | ||
73 | struct ipmi_smi *new_intf); | ||
74 | |||
75 | /* | ||
76 | * When called, the low-level interface should disable all | ||
77 | * processing, it should be complete shut down when it returns. | ||
78 | */ | ||
79 | void (*shutdown)(void *send_info); | ||
70 | 80 | ||
71 | /* | 81 | /* |
72 | * Get the detailed private info of the low level interface and store | 82 | * Get the detailed private info of the low level interface and store |
@@ -75,25 +85,31 @@ struct ipmi_smi_handlers { | |||
75 | */ | 85 | */ |
76 | int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data); | 86 | int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data); |
77 | 87 | ||
78 | /* Called to enqueue an SMI message to be sent. This | 88 | /* |
79 | operation is not allowed to fail. If an error occurs, it | 89 | * Called to enqueue an SMI message to be sent. This |
80 | should report back the error in a received message. It may | 90 | * operation is not allowed to fail. If an error occurs, it |
81 | do this in the current call context, since no write locks | 91 | * should report back the error in a received message. It may |
82 | are held when this is run. Message are delivered one at | 92 | * do this in the current call context, since no write locks |
83 | a time by the message handler, a new message will not be | 93 | * are held when this is run. Message are delivered one at |
84 | delivered until the previous message is returned. */ | 94 | * a time by the message handler, a new message will not be |
95 | * delivered until the previous message is returned. | ||
96 | */ | ||
85 | void (*sender)(void *send_info, | 97 | void (*sender)(void *send_info, |
86 | struct ipmi_smi_msg *msg); | 98 | struct ipmi_smi_msg *msg); |
87 | 99 | ||
88 | /* Called by the upper layer to request that we try to get | 100 | /* |
89 | events from the BMC we are attached to. */ | 101 | * Called by the upper layer to request that we try to get |
102 | * events from the BMC we are attached to. | ||
103 | */ | ||
90 | void (*request_events)(void *send_info); | 104 | void (*request_events)(void *send_info); |
91 | 105 | ||
92 | /* Called by the upper layer when some user requires that the | 106 | /* |
93 | interface watch for events, received messages, watchdog | 107 | * Called by the upper layer when some user requires that the |
94 | pretimeouts, or not. Used by the SMI to know if it should | 108 | * interface watch for events, received messages, watchdog |
95 | watch for these. This may be NULL if the SMI does not | 109 | * pretimeouts, or not. Used by the SMI to know if it should |
96 | implement it. */ | 110 | * watch for these. This may be NULL if the SMI does not |
111 | * implement it. | ||
112 | */ | ||
97 | void (*set_need_watch)(void *send_info, bool enable); | 113 | void (*set_need_watch)(void *send_info, bool enable); |
98 | 114 | ||
99 | /* | 115 | /* |
@@ -101,30 +117,29 @@ struct ipmi_smi_handlers { | |||
101 | */ | 117 | */ |
102 | void (*flush_messages)(void *send_info); | 118 | void (*flush_messages)(void *send_info); |
103 | 119 | ||
104 | /* Called when the interface should go into "run to | 120 | /* |
105 | completion" mode. If this call sets the value to true, the | 121 | * Called when the interface should go into "run to |
106 | interface should make sure that all messages are flushed | 122 | * completion" mode. If this call sets the value to true, the |
107 | out and that none are pending, and any new requests are run | 123 | * interface should make sure that all messages are flushed |
108 | to completion immediately. */ | 124 | * out and that none are pending, and any new requests are run |
125 | * to completion immediately. | ||
126 | */ | ||
109 | void (*set_run_to_completion)(void *send_info, bool run_to_completion); | 127 | void (*set_run_to_completion)(void *send_info, bool run_to_completion); |
110 | 128 | ||
111 | /* Called to poll for work to do. This is so upper layers can | 129 | /* |
112 | poll for operations during things like crash dumps. */ | 130 | * Called to poll for work to do. This is so upper layers can |
131 | * poll for operations during things like crash dumps. | ||
132 | */ | ||
113 | void (*poll)(void *send_info); | 133 | void (*poll)(void *send_info); |
114 | 134 | ||
115 | /* Enable/disable firmware maintenance mode. Note that this | 135 | /* |
116 | is *not* the modes defined, this is simply an on/off | 136 | * Enable/disable firmware maintenance mode. Note that this |
117 | setting. The message handler does the mode handling. Note | 137 | * is *not* the modes defined, this is simply an on/off |
118 | that this is called from interrupt context, so it cannot | 138 | * setting. The message handler does the mode handling. Note |
119 | block. */ | 139 | * that this is called from interrupt context, so it cannot |
140 | * block. | ||
141 | */ | ||
120 | void (*set_maintenance_mode)(void *send_info, bool enable); | 142 | void (*set_maintenance_mode)(void *send_info, bool enable); |
121 | |||
122 | /* Tell the handler that we are using it/not using it. The | ||
123 | message handler get the modules that this handler belongs | ||
124 | to; this function lets the SMI claim any modules that it | ||
125 | uses. These may be NULL if this is not required. */ | ||
126 | int (*inc_usecount)(void *send_info); | ||
127 | void (*dec_usecount)(void *send_info); | ||
128 | }; | 143 | }; |
129 | 144 | ||
130 | struct ipmi_device_id { | 145 | struct ipmi_device_id { |
@@ -143,7 +158,8 @@ struct ipmi_device_id { | |||
143 | #define ipmi_version_major(v) ((v)->ipmi_version & 0xf) | 158 | #define ipmi_version_major(v) ((v)->ipmi_version & 0xf) |
144 | #define ipmi_version_minor(v) ((v)->ipmi_version >> 4) | 159 | #define ipmi_version_minor(v) ((v)->ipmi_version >> 4) |
145 | 160 | ||
146 | /* Take a pointer to an IPMI response and extract device id information from | 161 | /* |
162 | * Take a pointer to an IPMI response and extract device id information from | ||
147 | * it. @netfn is in the IPMI_NETFN_ format, so may need to be shifted from | 163 | * it. @netfn is in the IPMI_NETFN_ format, so may need to be shifted from |
148 | * a SI response. | 164 | * a SI response. |
149 | */ | 165 | */ |
@@ -187,12 +203,14 @@ static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd, | |||
187 | return 0; | 203 | return 0; |
188 | } | 204 | } |
189 | 205 | ||
190 | /* Add a low-level interface to the IPMI driver. Note that if the | 206 | /* |
191 | interface doesn't know its slave address, it should pass in zero. | 207 | * Add a low-level interface to the IPMI driver. Note that if the |
192 | The low-level interface should not deliver any messages to the | 208 | * interface doesn't know its slave address, it should pass in zero. |
193 | upper layer until the start_processing() function in the handlers | 209 | * The low-level interface should not deliver any messages to the |
194 | is called, and the lower layer must get the interface from that | 210 | * upper layer until the start_processing() function in the handlers |
195 | call. */ | 211 | * is called, and the lower layer must get the interface from that |
212 | * call. | ||
213 | */ | ||
196 | int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | 214 | int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, |
197 | void *send_info, | 215 | void *send_info, |
198 | struct device *dev, | 216 | struct device *dev, |
@@ -202,7 +220,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | |||
202 | * Remove a low-level interface from the IPMI driver. This will | 220 | * Remove a low-level interface from the IPMI driver. This will |
203 | * return an error if the interface is still in use by a user. | 221 | * return an error if the interface is still in use by a user. |
204 | */ | 222 | */ |
205 | int ipmi_unregister_smi(ipmi_smi_t intf); | 223 | void ipmi_unregister_smi(struct ipmi_smi *intf); |
206 | 224 | ||
207 | /* | 225 | /* |
208 | * The lower layer reports received messages through this interface. | 226 | * The lower layer reports received messages through this interface. |
@@ -210,11 +228,11 @@ int ipmi_unregister_smi(ipmi_smi_t intf); | |||
210 | * the lower layer gets an error sending a message, it should format | 228 | * the lower layer gets an error sending a message, it should format |
211 | * an error response in the message response. | 229 | * an error response in the message response. |
212 | */ | 230 | */ |
213 | void ipmi_smi_msg_received(ipmi_smi_t intf, | 231 | void ipmi_smi_msg_received(struct ipmi_smi *intf, |
214 | struct ipmi_smi_msg *msg); | 232 | struct ipmi_smi_msg *msg); |
215 | 233 | ||
216 | /* The lower layer received a watchdog pre-timeout on interface. */ | 234 | /* The lower layer received a watchdog pre-timeout on interface. */ |
217 | void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf); | 235 | void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf); |
218 | 236 | ||
219 | struct ipmi_smi_msg *ipmi_alloc_smi_msg(void); | 237 | struct ipmi_smi_msg *ipmi_alloc_smi_msg(void); |
220 | static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) | 238 | static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) |
@@ -222,13 +240,4 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) | |||
222 | msg->done(msg); | 240 | msg->done(msg); |
223 | } | 241 | } |
224 | 242 | ||
225 | #ifdef CONFIG_IPMI_PROC_INTERFACE | ||
226 | /* Allow the lower layer to add things to the proc filesystem | ||
227 | directory for this interface. Note that the entry will | ||
228 | automatically be dstroyed when the interface is destroyed. */ | ||
229 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | ||
230 | const struct file_operations *proc_ops, | ||
231 | void *data); | ||
232 | #endif | ||
233 | |||
234 | #endif /* __LINUX_IPMI_SMI_H */ | 243 | #endif /* __LINUX_IPMI_SMI_H */ |