diff options
Diffstat (limited to 'drivers/acpi/acpi_ipmi.c')
-rw-r--r-- | drivers/acpi/acpi_ipmi.c | 594 |
1 files changed, 365 insertions, 229 deletions
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index f40acef80269..ac0f52f6df2b 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c | |||
@@ -1,8 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * acpi_ipmi.c - ACPI IPMI opregion | 2 | * acpi_ipmi.c - ACPI IPMI opregion |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Intel Corporation | 4 | * Copyright (C) 2010, 2013 Intel Corporation |
5 | * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com> | 5 | * Author: Zhao Yakui <yakui.zhao@intel.com> |
6 | * Lv Zheng <lv.zheng@intel.com> | ||
6 | * | 7 | * |
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 8 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
8 | * | 9 | * |
@@ -23,59 +24,58 @@ | |||
23 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 24 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
24 | */ | 25 | */ |
25 | 26 | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/init.h> | 28 | #include <linux/acpi.h> |
29 | #include <linux/types.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/proc_fs.h> | ||
32 | #include <linux/seq_file.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/list.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <acpi/acpi_bus.h> | ||
38 | #include <acpi/acpi_drivers.h> | ||
39 | #include <linux/ipmi.h> | 29 | #include <linux/ipmi.h> |
40 | #include <linux/device.h> | 30 | #include <linux/spinlock.h> |
41 | #include <linux/pnp.h> | ||
42 | 31 | ||
43 | MODULE_AUTHOR("Zhao Yakui"); | 32 | MODULE_AUTHOR("Zhao Yakui"); |
44 | MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); | 33 | MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); |
45 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
46 | 35 | ||
47 | #define IPMI_FLAGS_HANDLER_INSTALL 0 | ||
48 | |||
49 | #define ACPI_IPMI_OK 0 | 36 | #define ACPI_IPMI_OK 0 |
50 | #define ACPI_IPMI_TIMEOUT 0x10 | 37 | #define ACPI_IPMI_TIMEOUT 0x10 |
51 | #define ACPI_IPMI_UNKNOWN 0x07 | 38 | #define ACPI_IPMI_UNKNOWN 0x07 |
52 | /* the IPMI timeout is 5s */ | 39 | /* the IPMI timeout is 5s */ |
53 | #define IPMI_TIMEOUT (5 * HZ) | 40 | #define IPMI_TIMEOUT (5000) |
41 | #define ACPI_IPMI_MAX_MSG_LENGTH 64 | ||
54 | 42 | ||
55 | struct acpi_ipmi_device { | 43 | struct acpi_ipmi_device { |
56 | /* the device list attached to driver_data.ipmi_devices */ | 44 | /* the device list attached to driver_data.ipmi_devices */ |
57 | struct list_head head; | 45 | struct list_head head; |
46 | |||
58 | /* the IPMI request message list */ | 47 | /* the IPMI request message list */ |
59 | struct list_head tx_msg_list; | 48 | struct list_head tx_msg_list; |
60 | struct mutex tx_msg_lock; | 49 | |
50 | spinlock_t tx_msg_lock; | ||
61 | acpi_handle handle; | 51 | acpi_handle handle; |
62 | struct pnp_dev *pnp_dev; | 52 | struct device *dev; |
63 | ipmi_user_t user_interface; | 53 | ipmi_user_t user_interface; |
64 | int ipmi_ifnum; /* IPMI interface number */ | 54 | int ipmi_ifnum; /* IPMI interface number */ |
65 | long curr_msgid; | 55 | long curr_msgid; |
66 | unsigned long flags; | 56 | bool dead; |
67 | struct ipmi_smi_info smi_data; | 57 | struct kref kref; |
68 | }; | 58 | }; |
69 | 59 | ||
70 | struct ipmi_driver_data { | 60 | struct ipmi_driver_data { |
71 | struct list_head ipmi_devices; | 61 | struct list_head ipmi_devices; |
72 | struct ipmi_smi_watcher bmc_events; | 62 | struct ipmi_smi_watcher bmc_events; |
73 | struct ipmi_user_hndl ipmi_hndlrs; | 63 | struct ipmi_user_hndl ipmi_hndlrs; |
74 | struct mutex ipmi_lock; | 64 | struct mutex ipmi_lock; |
65 | |||
66 | /* | ||
67 | * NOTE: IPMI System Interface Selection | ||
68 | * There is no system interface specified by the IPMI operation | ||
69 | * region access. We try to select one system interface with ACPI | ||
70 | * handle set. IPMI messages passed from the ACPI codes are sent | ||
71 | * to this selected global IPMI system interface. | ||
72 | */ | ||
73 | struct acpi_ipmi_device *selected_smi; | ||
75 | }; | 74 | }; |
76 | 75 | ||
77 | struct acpi_ipmi_msg { | 76 | struct acpi_ipmi_msg { |
78 | struct list_head head; | 77 | struct list_head head; |
78 | |||
79 | /* | 79 | /* |
80 | * General speaking the addr type should be SI_ADDR_TYPE. And | 80 | * General speaking the addr type should be SI_ADDR_TYPE. And |
81 | * the addr channel should be BMC. | 81 | * the addr channel should be BMC. |
@@ -85,30 +85,31 @@ struct acpi_ipmi_msg { | |||
85 | */ | 85 | */ |
86 | struct ipmi_addr addr; | 86 | struct ipmi_addr addr; |
87 | long tx_msgid; | 87 | long tx_msgid; |
88 | |||
88 | /* it is used to track whether the IPMI message is finished */ | 89 | /* it is used to track whether the IPMI message is finished */ |
89 | struct completion tx_complete; | 90 | struct completion tx_complete; |
91 | |||
90 | struct kernel_ipmi_msg tx_message; | 92 | struct kernel_ipmi_msg tx_message; |
91 | int msg_done; | 93 | int msg_done; |
92 | /* tx data . And copy it from ACPI object buffer */ | 94 | |
93 | u8 tx_data[64]; | 95 | /* tx/rx data . And copy it from/to ACPI object buffer */ |
94 | int tx_len; | 96 | u8 data[ACPI_IPMI_MAX_MSG_LENGTH]; |
95 | u8 rx_data[64]; | 97 | u8 rx_len; |
96 | int rx_len; | 98 | |
97 | struct acpi_ipmi_device *device; | 99 | struct acpi_ipmi_device *device; |
100 | struct kref kref; | ||
98 | }; | 101 | }; |
99 | 102 | ||
100 | /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */ | 103 | /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */ |
101 | struct acpi_ipmi_buffer { | 104 | struct acpi_ipmi_buffer { |
102 | u8 status; | 105 | u8 status; |
103 | u8 length; | 106 | u8 length; |
104 | u8 data[64]; | 107 | u8 data[ACPI_IPMI_MAX_MSG_LENGTH]; |
105 | }; | 108 | }; |
106 | 109 | ||
107 | static void ipmi_register_bmc(int iface, struct device *dev); | 110 | static void ipmi_register_bmc(int iface, struct device *dev); |
108 | static void ipmi_bmc_gone(int iface); | 111 | static void ipmi_bmc_gone(int iface); |
109 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); | 112 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); |
110 | static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device); | ||
111 | static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device); | ||
112 | 113 | ||
113 | static struct ipmi_driver_data driver_data = { | 114 | static struct ipmi_driver_data driver_data = { |
114 | .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices), | 115 | .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices), |
@@ -120,50 +121,174 @@ static struct ipmi_driver_data driver_data = { | |||
120 | .ipmi_hndlrs = { | 121 | .ipmi_hndlrs = { |
121 | .ipmi_recv_hndl = ipmi_msg_handler, | 122 | .ipmi_recv_hndl = ipmi_msg_handler, |
122 | }, | 123 | }, |
124 | .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock) | ||
123 | }; | 125 | }; |
124 | 126 | ||
125 | static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi) | 127 | static struct acpi_ipmi_device * |
128 | ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle) | ||
126 | { | 129 | { |
130 | struct acpi_ipmi_device *ipmi_device; | ||
131 | int err; | ||
132 | ipmi_user_t user; | ||
133 | |||
134 | ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL); | ||
135 | if (!ipmi_device) | ||
136 | return NULL; | ||
137 | |||
138 | kref_init(&ipmi_device->kref); | ||
139 | INIT_LIST_HEAD(&ipmi_device->head); | ||
140 | INIT_LIST_HEAD(&ipmi_device->tx_msg_list); | ||
141 | spin_lock_init(&ipmi_device->tx_msg_lock); | ||
142 | ipmi_device->handle = handle; | ||
143 | ipmi_device->dev = get_device(dev); | ||
144 | ipmi_device->ipmi_ifnum = iface; | ||
145 | |||
146 | err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs, | ||
147 | ipmi_device, &user); | ||
148 | if (err) { | ||
149 | put_device(dev); | ||
150 | kfree(ipmi_device); | ||
151 | return NULL; | ||
152 | } | ||
153 | ipmi_device->user_interface = user; | ||
154 | |||
155 | return ipmi_device; | ||
156 | } | ||
157 | |||
158 | static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device) | ||
159 | { | ||
160 | ipmi_destroy_user(ipmi_device->user_interface); | ||
161 | put_device(ipmi_device->dev); | ||
162 | kfree(ipmi_device); | ||
163 | } | ||
164 | |||
165 | static void ipmi_dev_release_kref(struct kref *kref) | ||
166 | { | ||
167 | struct acpi_ipmi_device *ipmi = | ||
168 | container_of(kref, struct acpi_ipmi_device, kref); | ||
169 | |||
170 | ipmi_dev_release(ipmi); | ||
171 | } | ||
172 | |||
173 | static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device) | ||
174 | { | ||
175 | list_del(&ipmi_device->head); | ||
176 | if (driver_data.selected_smi == ipmi_device) | ||
177 | driver_data.selected_smi = NULL; | ||
178 | |||
179 | /* | ||
180 | * Always setting dead flag after deleting from the list or | ||
181 | * list_for_each_entry() codes must get changed. | ||
182 | */ | ||
183 | ipmi_device->dead = true; | ||
184 | } | ||
185 | |||
186 | static struct acpi_ipmi_device *acpi_ipmi_dev_get(void) | ||
187 | { | ||
188 | struct acpi_ipmi_device *ipmi_device = NULL; | ||
189 | |||
190 | mutex_lock(&driver_data.ipmi_lock); | ||
191 | if (driver_data.selected_smi) { | ||
192 | ipmi_device = driver_data.selected_smi; | ||
193 | kref_get(&ipmi_device->kref); | ||
194 | } | ||
195 | mutex_unlock(&driver_data.ipmi_lock); | ||
196 | |||
197 | return ipmi_device; | ||
198 | } | ||
199 | |||
200 | static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device) | ||
201 | { | ||
202 | kref_put(&ipmi_device->kref, ipmi_dev_release_kref); | ||
203 | } | ||
204 | |||
205 | static struct acpi_ipmi_msg *ipmi_msg_alloc(void) | ||
206 | { | ||
207 | struct acpi_ipmi_device *ipmi; | ||
127 | struct acpi_ipmi_msg *ipmi_msg; | 208 | struct acpi_ipmi_msg *ipmi_msg; |
128 | struct pnp_dev *pnp_dev = ipmi->pnp_dev; | 209 | |
210 | ipmi = acpi_ipmi_dev_get(); | ||
211 | if (!ipmi) | ||
212 | return NULL; | ||
129 | 213 | ||
130 | ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL); | 214 | ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL); |
131 | if (!ipmi_msg) { | 215 | if (!ipmi_msg) { |
132 | dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n"); | 216 | acpi_ipmi_dev_put(ipmi); |
133 | return NULL; | 217 | return NULL; |
134 | } | 218 | } |
219 | |||
220 | kref_init(&ipmi_msg->kref); | ||
135 | init_completion(&ipmi_msg->tx_complete); | 221 | init_completion(&ipmi_msg->tx_complete); |
136 | INIT_LIST_HEAD(&ipmi_msg->head); | 222 | INIT_LIST_HEAD(&ipmi_msg->head); |
137 | ipmi_msg->device = ipmi; | 223 | ipmi_msg->device = ipmi; |
224 | ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN; | ||
225 | |||
138 | return ipmi_msg; | 226 | return ipmi_msg; |
139 | } | 227 | } |
140 | 228 | ||
141 | #define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff) | 229 | static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg) |
142 | #define IPMI_OP_RGN_CMD(offset) (offset & 0xff) | 230 | { |
143 | static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, | 231 | acpi_ipmi_dev_put(tx_msg->device); |
144 | acpi_physical_address address, | 232 | kfree(tx_msg); |
145 | acpi_integer *value) | 233 | } |
234 | |||
235 | static void ipmi_msg_release_kref(struct kref *kref) | ||
236 | { | ||
237 | struct acpi_ipmi_msg *tx_msg = | ||
238 | container_of(kref, struct acpi_ipmi_msg, kref); | ||
239 | |||
240 | ipmi_msg_release(tx_msg); | ||
241 | } | ||
242 | |||
243 | static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg) | ||
244 | { | ||
245 | kref_get(&tx_msg->kref); | ||
246 | |||
247 | return tx_msg; | ||
248 | } | ||
249 | |||
250 | static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg) | ||
251 | { | ||
252 | kref_put(&tx_msg->kref, ipmi_msg_release_kref); | ||
253 | } | ||
254 | |||
255 | #define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff) | ||
256 | #define IPMI_OP_RGN_CMD(offset) (offset & 0xff) | ||
257 | static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg, | ||
258 | acpi_physical_address address, | ||
259 | acpi_integer *value) | ||
146 | { | 260 | { |
147 | struct kernel_ipmi_msg *msg; | 261 | struct kernel_ipmi_msg *msg; |
148 | struct acpi_ipmi_buffer *buffer; | 262 | struct acpi_ipmi_buffer *buffer; |
149 | struct acpi_ipmi_device *device; | 263 | struct acpi_ipmi_device *device; |
264 | unsigned long flags; | ||
150 | 265 | ||
151 | msg = &tx_msg->tx_message; | 266 | msg = &tx_msg->tx_message; |
267 | |||
152 | /* | 268 | /* |
153 | * IPMI network function and command are encoded in the address | 269 | * IPMI network function and command are encoded in the address |
154 | * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3. | 270 | * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3. |
155 | */ | 271 | */ |
156 | msg->netfn = IPMI_OP_RGN_NETFN(address); | 272 | msg->netfn = IPMI_OP_RGN_NETFN(address); |
157 | msg->cmd = IPMI_OP_RGN_CMD(address); | 273 | msg->cmd = IPMI_OP_RGN_CMD(address); |
158 | msg->data = tx_msg->tx_data; | 274 | msg->data = tx_msg->data; |
275 | |||
159 | /* | 276 | /* |
160 | * value is the parameter passed by the IPMI opregion space handler. | 277 | * value is the parameter passed by the IPMI opregion space handler. |
161 | * It points to the IPMI request message buffer | 278 | * It points to the IPMI request message buffer |
162 | */ | 279 | */ |
163 | buffer = (struct acpi_ipmi_buffer *)value; | 280 | buffer = (struct acpi_ipmi_buffer *)value; |
281 | |||
164 | /* copy the tx message data */ | 282 | /* copy the tx message data */ |
283 | if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) { | ||
284 | dev_WARN_ONCE(tx_msg->device->dev, true, | ||
285 | "Unexpected request (msg len %d).\n", | ||
286 | buffer->length); | ||
287 | return -EINVAL; | ||
288 | } | ||
165 | msg->data_len = buffer->length; | 289 | msg->data_len = buffer->length; |
166 | memcpy(tx_msg->tx_data, buffer->data, msg->data_len); | 290 | memcpy(tx_msg->data, buffer->data, msg->data_len); |
291 | |||
167 | /* | 292 | /* |
168 | * now the default type is SYSTEM_INTERFACE and channel type is BMC. | 293 | * now the default type is SYSTEM_INTERFACE and channel type is BMC. |
169 | * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE, | 294 | * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE, |
@@ -177,14 +302,17 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, | |||
177 | 302 | ||
178 | /* Get the msgid */ | 303 | /* Get the msgid */ |
179 | device = tx_msg->device; | 304 | device = tx_msg->device; |
180 | mutex_lock(&device->tx_msg_lock); | 305 | |
306 | spin_lock_irqsave(&device->tx_msg_lock, flags); | ||
181 | device->curr_msgid++; | 307 | device->curr_msgid++; |
182 | tx_msg->tx_msgid = device->curr_msgid; | 308 | tx_msg->tx_msgid = device->curr_msgid; |
183 | mutex_unlock(&device->tx_msg_lock); | 309 | spin_unlock_irqrestore(&device->tx_msg_lock, flags); |
310 | |||
311 | return 0; | ||
184 | } | 312 | } |
185 | 313 | ||
186 | static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, | 314 | static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, |
187 | acpi_integer *value, int rem_time) | 315 | acpi_integer *value) |
188 | { | 316 | { |
189 | struct acpi_ipmi_buffer *buffer; | 317 | struct acpi_ipmi_buffer *buffer; |
190 | 318 | ||
@@ -193,109 +321,158 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, | |||
193 | * IPMI message returned by IPMI command. | 321 | * IPMI message returned by IPMI command. |
194 | */ | 322 | */ |
195 | buffer = (struct acpi_ipmi_buffer *)value; | 323 | buffer = (struct acpi_ipmi_buffer *)value; |
196 | if (!rem_time && !msg->msg_done) { | 324 | |
197 | buffer->status = ACPI_IPMI_TIMEOUT; | ||
198 | return; | ||
199 | } | ||
200 | /* | 325 | /* |
201 | * If the flag of msg_done is not set or the recv length is zero, it | 326 | * If the flag of msg_done is not set, it means that the IPMI command is |
202 | * means that the IPMI command is not executed correctly. | 327 | * not executed correctly. |
203 | * The status code will be ACPI_IPMI_UNKNOWN. | ||
204 | */ | 328 | */ |
205 | if (!msg->msg_done || !msg->rx_len) { | 329 | buffer->status = msg->msg_done; |
206 | buffer->status = ACPI_IPMI_UNKNOWN; | 330 | if (msg->msg_done != ACPI_IPMI_OK) |
207 | return; | 331 | return; |
208 | } | 332 | |
209 | /* | 333 | /* |
210 | * If the IPMI response message is obtained correctly, the status code | 334 | * If the IPMI response message is obtained correctly, the status code |
211 | * will be ACPI_IPMI_OK | 335 | * will be ACPI_IPMI_OK |
212 | */ | 336 | */ |
213 | buffer->status = ACPI_IPMI_OK; | ||
214 | buffer->length = msg->rx_len; | 337 | buffer->length = msg->rx_len; |
215 | memcpy(buffer->data, msg->rx_data, msg->rx_len); | 338 | memcpy(buffer->data, msg->data, msg->rx_len); |
216 | } | 339 | } |
217 | 340 | ||
218 | static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) | 341 | static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) |
219 | { | 342 | { |
220 | struct acpi_ipmi_msg *tx_msg, *temp; | 343 | struct acpi_ipmi_msg *tx_msg; |
221 | int count = HZ / 10; | 344 | unsigned long flags; |
222 | struct pnp_dev *pnp_dev = ipmi->pnp_dev; | 345 | |
346 | /* | ||
347 | * NOTE: On-going ipmi_recv_msg | ||
348 | * ipmi_msg_handler() may still be invoked by ipmi_si after | ||
349 | * flushing. But it is safe to do a fast flushing on module_exit() | ||
350 | * without waiting for all ipmi_recv_msg(s) to complete from | ||
351 | * ipmi_msg_handler() as it is ensured by ipmi_si that all | ||
352 | * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user(). | ||
353 | */ | ||
354 | spin_lock_irqsave(&ipmi->tx_msg_lock, flags); | ||
355 | while (!list_empty(&ipmi->tx_msg_list)) { | ||
356 | tx_msg = list_first_entry(&ipmi->tx_msg_list, | ||
357 | struct acpi_ipmi_msg, | ||
358 | head); | ||
359 | list_del(&tx_msg->head); | ||
360 | spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags); | ||
223 | 361 | ||
224 | list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { | ||
225 | /* wake up the sleep thread on the Tx msg */ | 362 | /* wake up the sleep thread on the Tx msg */ |
226 | complete(&tx_msg->tx_complete); | 363 | complete(&tx_msg->tx_complete); |
364 | acpi_ipmi_msg_put(tx_msg); | ||
365 | spin_lock_irqsave(&ipmi->tx_msg_lock, flags); | ||
227 | } | 366 | } |
367 | spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags); | ||
368 | } | ||
228 | 369 | ||
229 | /* wait for about 100ms to flush the tx message list */ | 370 | static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi, |
230 | while (count--) { | 371 | struct acpi_ipmi_msg *msg) |
231 | if (list_empty(&ipmi->tx_msg_list)) | 372 | { |
373 | struct acpi_ipmi_msg *tx_msg, *temp; | ||
374 | bool msg_found = false; | ||
375 | unsigned long flags; | ||
376 | |||
377 | spin_lock_irqsave(&ipmi->tx_msg_lock, flags); | ||
378 | list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { | ||
379 | if (msg == tx_msg) { | ||
380 | msg_found = true; | ||
381 | list_del(&tx_msg->head); | ||
232 | break; | 382 | break; |
233 | schedule_timeout(1); | 383 | } |
234 | } | 384 | } |
235 | if (!list_empty(&ipmi->tx_msg_list)) | 385 | spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags); |
236 | dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n"); | 386 | |
387 | if (msg_found) | ||
388 | acpi_ipmi_msg_put(tx_msg); | ||
237 | } | 389 | } |
238 | 390 | ||
239 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) | 391 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) |
240 | { | 392 | { |
241 | struct acpi_ipmi_device *ipmi_device = user_msg_data; | 393 | struct acpi_ipmi_device *ipmi_device = user_msg_data; |
242 | int msg_found = 0; | 394 | bool msg_found = false; |
243 | struct acpi_ipmi_msg *tx_msg; | 395 | struct acpi_ipmi_msg *tx_msg, *temp; |
244 | struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; | 396 | struct device *dev = ipmi_device->dev; |
397 | unsigned long flags; | ||
245 | 398 | ||
246 | if (msg->user != ipmi_device->user_interface) { | 399 | if (msg->user != ipmi_device->user_interface) { |
247 | dev_warn(&pnp_dev->dev, "Unexpected response is returned. " | 400 | dev_warn(dev, |
248 | "returned user %p, expected user %p\n", | 401 | "Unexpected response is returned. returned user %p, expected user %p\n", |
249 | msg->user, ipmi_device->user_interface); | 402 | msg->user, ipmi_device->user_interface); |
250 | ipmi_free_recv_msg(msg); | 403 | goto out_msg; |
251 | return; | ||
252 | } | 404 | } |
253 | mutex_lock(&ipmi_device->tx_msg_lock); | 405 | |
254 | list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { | 406 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); |
407 | list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) { | ||
255 | if (msg->msgid == tx_msg->tx_msgid) { | 408 | if (msg->msgid == tx_msg->tx_msgid) { |
256 | msg_found = 1; | 409 | msg_found = true; |
410 | list_del(&tx_msg->head); | ||
257 | break; | 411 | break; |
258 | } | 412 | } |
259 | } | 413 | } |
414 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); | ||
260 | 415 | ||
261 | mutex_unlock(&ipmi_device->tx_msg_lock); | ||
262 | if (!msg_found) { | 416 | if (!msg_found) { |
263 | dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " | 417 | dev_warn(dev, |
264 | "returned.\n", msg->msgid); | 418 | "Unexpected response (msg id %ld) is returned.\n", |
265 | ipmi_free_recv_msg(msg); | 419 | msg->msgid); |
266 | return; | 420 | goto out_msg; |
421 | } | ||
422 | |||
423 | /* copy the response data to Rx_data buffer */ | ||
424 | if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) { | ||
425 | dev_WARN_ONCE(dev, true, | ||
426 | "Unexpected response (msg len %d).\n", | ||
427 | msg->msg.data_len); | ||
428 | goto out_comp; | ||
267 | } | 429 | } |
268 | 430 | ||
269 | if (msg->msg.data_len) { | 431 | /* response msg is an error msg */ |
270 | /* copy the response data to Rx_data buffer */ | 432 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
271 | memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len); | 433 | if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE && |
272 | tx_msg->rx_len = msg->msg.data_len; | 434 | msg->msg.data_len == 1) { |
273 | tx_msg->msg_done = 1; | 435 | if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) { |
436 | dev_WARN_ONCE(dev, true, | ||
437 | "Unexpected response (timeout).\n"); | ||
438 | tx_msg->msg_done = ACPI_IPMI_TIMEOUT; | ||
439 | } | ||
440 | goto out_comp; | ||
274 | } | 441 | } |
442 | |||
443 | tx_msg->rx_len = msg->msg.data_len; | ||
444 | memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len); | ||
445 | tx_msg->msg_done = ACPI_IPMI_OK; | ||
446 | |||
447 | out_comp: | ||
275 | complete(&tx_msg->tx_complete); | 448 | complete(&tx_msg->tx_complete); |
449 | acpi_ipmi_msg_put(tx_msg); | ||
450 | out_msg: | ||
276 | ipmi_free_recv_msg(msg); | 451 | ipmi_free_recv_msg(msg); |
277 | }; | 452 | } |
278 | 453 | ||
279 | static void ipmi_register_bmc(int iface, struct device *dev) | 454 | static void ipmi_register_bmc(int iface, struct device *dev) |
280 | { | 455 | { |
281 | struct acpi_ipmi_device *ipmi_device, *temp; | 456 | struct acpi_ipmi_device *ipmi_device, *temp; |
282 | struct pnp_dev *pnp_dev; | ||
283 | ipmi_user_t user; | ||
284 | int err; | 457 | int err; |
285 | struct ipmi_smi_info smi_data; | 458 | struct ipmi_smi_info smi_data; |
286 | acpi_handle handle; | 459 | acpi_handle handle; |
287 | 460 | ||
288 | err = ipmi_get_smi_info(iface, &smi_data); | 461 | err = ipmi_get_smi_info(iface, &smi_data); |
289 | |||
290 | if (err) | 462 | if (err) |
291 | return; | 463 | return; |
292 | 464 | ||
293 | if (smi_data.addr_src != SI_ACPI) { | 465 | if (smi_data.addr_src != SI_ACPI) |
294 | put_device(smi_data.dev); | 466 | goto err_ref; |
295 | return; | ||
296 | } | ||
297 | |||
298 | handle = smi_data.addr_info.acpi_info.acpi_handle; | 467 | handle = smi_data.addr_info.acpi_info.acpi_handle; |
468 | if (!handle) | ||
469 | goto err_ref; | ||
470 | |||
471 | ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle); | ||
472 | if (!ipmi_device) { | ||
473 | dev_warn(smi_data.dev, "Can't create IPMI user interface\n"); | ||
474 | goto err_ref; | ||
475 | } | ||
299 | 476 | ||
300 | mutex_lock(&driver_data.ipmi_lock); | 477 | mutex_lock(&driver_data.ipmi_lock); |
301 | list_for_each_entry(temp, &driver_data.ipmi_devices, head) { | 478 | list_for_each_entry(temp, &driver_data.ipmi_devices, head) { |
@@ -304,34 +481,20 @@ static void ipmi_register_bmc(int iface, struct device *dev) | |||
304 | * to the device list, don't add it again. | 481 | * to the device list, don't add it again. |
305 | */ | 482 | */ |
306 | if (temp->handle == handle) | 483 | if (temp->handle == handle) |
307 | goto out; | 484 | goto err_lock; |
308 | } | ||
309 | |||
310 | ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL); | ||
311 | |||
312 | if (!ipmi_device) | ||
313 | goto out; | ||
314 | |||
315 | pnp_dev = to_pnp_dev(smi_data.dev); | ||
316 | ipmi_device->handle = handle; | ||
317 | ipmi_device->pnp_dev = pnp_dev; | ||
318 | |||
319 | err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs, | ||
320 | ipmi_device, &user); | ||
321 | if (err) { | ||
322 | dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n"); | ||
323 | kfree(ipmi_device); | ||
324 | goto out; | ||
325 | } | 485 | } |
326 | acpi_add_ipmi_device(ipmi_device); | 486 | if (!driver_data.selected_smi) |
327 | ipmi_device->user_interface = user; | 487 | driver_data.selected_smi = ipmi_device; |
328 | ipmi_device->ipmi_ifnum = iface; | 488 | list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); |
329 | mutex_unlock(&driver_data.ipmi_lock); | 489 | mutex_unlock(&driver_data.ipmi_lock); |
330 | memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info)); | 490 | |
491 | put_device(smi_data.dev); | ||
331 | return; | 492 | return; |
332 | 493 | ||
333 | out: | 494 | err_lock: |
334 | mutex_unlock(&driver_data.ipmi_lock); | 495 | mutex_unlock(&driver_data.ipmi_lock); |
496 | ipmi_dev_release(ipmi_device); | ||
497 | err_ref: | ||
335 | put_device(smi_data.dev); | 498 | put_device(smi_data.dev); |
336 | return; | 499 | return; |
337 | } | 500 | } |
@@ -339,23 +502,29 @@ out: | |||
339 | static void ipmi_bmc_gone(int iface) | 502 | static void ipmi_bmc_gone(int iface) |
340 | { | 503 | { |
341 | struct acpi_ipmi_device *ipmi_device, *temp; | 504 | struct acpi_ipmi_device *ipmi_device, *temp; |
505 | bool dev_found = false; | ||
342 | 506 | ||
343 | mutex_lock(&driver_data.ipmi_lock); | 507 | mutex_lock(&driver_data.ipmi_lock); |
344 | list_for_each_entry_safe(ipmi_device, temp, | 508 | list_for_each_entry_safe(ipmi_device, temp, |
345 | &driver_data.ipmi_devices, head) { | 509 | &driver_data.ipmi_devices, head) { |
346 | if (ipmi_device->ipmi_ifnum != iface) | 510 | if (ipmi_device->ipmi_ifnum != iface) { |
347 | continue; | 511 | dev_found = true; |
348 | 512 | __ipmi_dev_kill(ipmi_device); | |
349 | acpi_remove_ipmi_device(ipmi_device); | 513 | break; |
350 | put_device(ipmi_device->smi_data.dev); | 514 | } |
351 | kfree(ipmi_device); | ||
352 | break; | ||
353 | } | 515 | } |
516 | if (!driver_data.selected_smi) | ||
517 | driver_data.selected_smi = list_first_entry_or_null( | ||
518 | &driver_data.ipmi_devices, | ||
519 | struct acpi_ipmi_device, head); | ||
354 | mutex_unlock(&driver_data.ipmi_lock); | 520 | mutex_unlock(&driver_data.ipmi_lock); |
521 | |||
522 | if (dev_found) { | ||
523 | ipmi_flush_tx_msg(ipmi_device); | ||
524 | acpi_ipmi_dev_put(ipmi_device); | ||
525 | } | ||
355 | } | 526 | } |
356 | /* -------------------------------------------------------------------------- | 527 | |
357 | * Address Space Management | ||
358 | * -------------------------------------------------------------------------- */ | ||
359 | /* | 528 | /* |
360 | * This is the IPMI opregion space handler. | 529 | * This is the IPMI opregion space handler. |
361 | * @function: indicates the read/write. In fact as the IPMI message is driven | 530 | * @function: indicates the read/write. In fact as the IPMI message is driven |
@@ -368,16 +537,17 @@ static void ipmi_bmc_gone(int iface) | |||
368 | * the response IPMI message returned by IPMI command. | 537 | * the response IPMI message returned by IPMI command. |
369 | * @handler_context: IPMI device context. | 538 | * @handler_context: IPMI device context. |
370 | */ | 539 | */ |
371 | |||
372 | static acpi_status | 540 | static acpi_status |
373 | acpi_ipmi_space_handler(u32 function, acpi_physical_address address, | 541 | acpi_ipmi_space_handler(u32 function, acpi_physical_address address, |
374 | u32 bits, acpi_integer *value, | 542 | u32 bits, acpi_integer *value, |
375 | void *handler_context, void *region_context) | 543 | void *handler_context, void *region_context) |
376 | { | 544 | { |
377 | struct acpi_ipmi_msg *tx_msg; | 545 | struct acpi_ipmi_msg *tx_msg; |
378 | struct acpi_ipmi_device *ipmi_device = handler_context; | 546 | struct acpi_ipmi_device *ipmi_device; |
379 | int err, rem_time; | 547 | int err; |
380 | acpi_status status; | 548 | acpi_status status; |
549 | unsigned long flags; | ||
550 | |||
381 | /* | 551 | /* |
382 | * IPMI opregion message. | 552 | * IPMI opregion message. |
383 | * IPMI message is firstly written to the BMC and system software | 553 | * IPMI message is firstly written to the BMC and system software |
@@ -387,118 +557,75 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, | |||
387 | if ((function & ACPI_IO_MASK) == ACPI_READ) | 557 | if ((function & ACPI_IO_MASK) == ACPI_READ) |
388 | return AE_TYPE; | 558 | return AE_TYPE; |
389 | 559 | ||
390 | if (!ipmi_device->user_interface) | 560 | tx_msg = ipmi_msg_alloc(); |
561 | if (!tx_msg) | ||
391 | return AE_NOT_EXIST; | 562 | return AE_NOT_EXIST; |
563 | ipmi_device = tx_msg->device; | ||
392 | 564 | ||
393 | tx_msg = acpi_alloc_ipmi_msg(ipmi_device); | 565 | if (acpi_format_ipmi_request(tx_msg, address, value) != 0) { |
394 | if (!tx_msg) | 566 | ipmi_msg_release(tx_msg); |
395 | return AE_NO_MEMORY; | 567 | return AE_TYPE; |
568 | } | ||
396 | 569 | ||
397 | acpi_format_ipmi_msg(tx_msg, address, value); | 570 | acpi_ipmi_msg_get(tx_msg); |
398 | mutex_lock(&ipmi_device->tx_msg_lock); | 571 | mutex_lock(&driver_data.ipmi_lock); |
572 | /* Do not add a tx_msg that can not be flushed. */ | ||
573 | if (ipmi_device->dead) { | ||
574 | mutex_unlock(&driver_data.ipmi_lock); | ||
575 | ipmi_msg_release(tx_msg); | ||
576 | return AE_NOT_EXIST; | ||
577 | } | ||
578 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); | ||
399 | list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); | 579 | list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); |
400 | mutex_unlock(&ipmi_device->tx_msg_lock); | 580 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); |
581 | mutex_unlock(&driver_data.ipmi_lock); | ||
582 | |||
401 | err = ipmi_request_settime(ipmi_device->user_interface, | 583 | err = ipmi_request_settime(ipmi_device->user_interface, |
402 | &tx_msg->addr, | 584 | &tx_msg->addr, |
403 | tx_msg->tx_msgid, | 585 | tx_msg->tx_msgid, |
404 | &tx_msg->tx_message, | 586 | &tx_msg->tx_message, |
405 | NULL, 0, 0, 0); | 587 | NULL, 0, 0, IPMI_TIMEOUT); |
406 | if (err) { | 588 | if (err) { |
407 | status = AE_ERROR; | 589 | status = AE_ERROR; |
408 | goto end_label; | 590 | goto out_msg; |
409 | } | 591 | } |
410 | rem_time = wait_for_completion_timeout(&tx_msg->tx_complete, | 592 | wait_for_completion(&tx_msg->tx_complete); |
411 | IPMI_TIMEOUT); | 593 | |
412 | acpi_format_ipmi_response(tx_msg, value, rem_time); | 594 | acpi_format_ipmi_response(tx_msg, value); |
413 | status = AE_OK; | 595 | status = AE_OK; |
414 | 596 | ||
415 | end_label: | 597 | out_msg: |
416 | mutex_lock(&ipmi_device->tx_msg_lock); | 598 | ipmi_cancel_tx_msg(ipmi_device, tx_msg); |
417 | list_del(&tx_msg->head); | 599 | acpi_ipmi_msg_put(tx_msg); |
418 | mutex_unlock(&ipmi_device->tx_msg_lock); | ||
419 | kfree(tx_msg); | ||
420 | return status; | 600 | return status; |
421 | } | 601 | } |
422 | 602 | ||
423 | static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi) | 603 | static int __init acpi_ipmi_init(void) |
424 | { | ||
425 | if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) | ||
426 | return; | ||
427 | |||
428 | acpi_remove_address_space_handler(ipmi->handle, | ||
429 | ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler); | ||
430 | |||
431 | clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); | ||
432 | } | ||
433 | |||
434 | static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi) | ||
435 | { | 604 | { |
605 | int result; | ||
436 | acpi_status status; | 606 | acpi_status status; |
437 | 607 | ||
438 | if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) | 608 | if (acpi_disabled) |
439 | return 0; | 609 | return 0; |
440 | 610 | ||
441 | status = acpi_install_address_space_handler(ipmi->handle, | 611 | status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, |
442 | ACPI_ADR_SPACE_IPMI, | 612 | ACPI_ADR_SPACE_IPMI, |
443 | &acpi_ipmi_space_handler, | 613 | &acpi_ipmi_space_handler, |
444 | NULL, ipmi); | 614 | NULL, NULL); |
445 | if (ACPI_FAILURE(status)) { | 615 | if (ACPI_FAILURE(status)) { |
446 | struct pnp_dev *pnp_dev = ipmi->pnp_dev; | 616 | pr_warn("Can't register IPMI opregion space handle\n"); |
447 | dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space " | ||
448 | "handle\n"); | ||
449 | return -EINVAL; | 617 | return -EINVAL; |
450 | } | 618 | } |
451 | set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device) | ||
456 | { | ||
457 | |||
458 | INIT_LIST_HEAD(&ipmi_device->head); | ||
459 | |||
460 | mutex_init(&ipmi_device->tx_msg_lock); | ||
461 | INIT_LIST_HEAD(&ipmi_device->tx_msg_list); | ||
462 | ipmi_install_space_handler(ipmi_device); | ||
463 | |||
464 | list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); | ||
465 | } | ||
466 | |||
467 | static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device) | ||
468 | { | ||
469 | /* | ||
470 | * If the IPMI user interface is created, it should be | ||
471 | * destroyed. | ||
472 | */ | ||
473 | if (ipmi_device->user_interface) { | ||
474 | ipmi_destroy_user(ipmi_device->user_interface); | ||
475 | ipmi_device->user_interface = NULL; | ||
476 | } | ||
477 | /* flush the Tx_msg list */ | ||
478 | if (!list_empty(&ipmi_device->tx_msg_list)) | ||
479 | ipmi_flush_tx_msg(ipmi_device); | ||
480 | |||
481 | list_del(&ipmi_device->head); | ||
482 | ipmi_remove_space_handler(ipmi_device); | ||
483 | } | ||
484 | |||
485 | static int __init acpi_ipmi_init(void) | ||
486 | { | ||
487 | int result = 0; | ||
488 | |||
489 | if (acpi_disabled) | ||
490 | return result; | ||
491 | |||
492 | mutex_init(&driver_data.ipmi_lock); | ||
493 | |||
494 | result = ipmi_smi_watcher_register(&driver_data.bmc_events); | 619 | result = ipmi_smi_watcher_register(&driver_data.bmc_events); |
620 | if (result) | ||
621 | pr_err("Can't register IPMI system interface watcher\n"); | ||
495 | 622 | ||
496 | return result; | 623 | return result; |
497 | } | 624 | } |
498 | 625 | ||
499 | static void __exit acpi_ipmi_exit(void) | 626 | static void __exit acpi_ipmi_exit(void) |
500 | { | 627 | { |
501 | struct acpi_ipmi_device *ipmi_device, *temp; | 628 | struct acpi_ipmi_device *ipmi_device; |
502 | 629 | ||
503 | if (acpi_disabled) | 630 | if (acpi_disabled) |
504 | return; | 631 | return; |
@@ -512,13 +639,22 @@ static void __exit acpi_ipmi_exit(void) | |||
512 | * handler and free it. | 639 | * handler and free it. |
513 | */ | 640 | */ |
514 | mutex_lock(&driver_data.ipmi_lock); | 641 | mutex_lock(&driver_data.ipmi_lock); |
515 | list_for_each_entry_safe(ipmi_device, temp, | 642 | while (!list_empty(&driver_data.ipmi_devices)) { |
516 | &driver_data.ipmi_devices, head) { | 643 | ipmi_device = list_first_entry(&driver_data.ipmi_devices, |
517 | acpi_remove_ipmi_device(ipmi_device); | 644 | struct acpi_ipmi_device, |
518 | put_device(ipmi_device->smi_data.dev); | 645 | head); |
519 | kfree(ipmi_device); | 646 | __ipmi_dev_kill(ipmi_device); |
647 | mutex_unlock(&driver_data.ipmi_lock); | ||
648 | |||
649 | ipmi_flush_tx_msg(ipmi_device); | ||
650 | acpi_ipmi_dev_put(ipmi_device); | ||
651 | |||
652 | mutex_lock(&driver_data.ipmi_lock); | ||
520 | } | 653 | } |
521 | mutex_unlock(&driver_data.ipmi_lock); | 654 | mutex_unlock(&driver_data.ipmi_lock); |
655 | acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, | ||
656 | ACPI_ADR_SPACE_IPMI, | ||
657 | &acpi_ipmi_space_handler); | ||
522 | } | 658 | } |
523 | 659 | ||
524 | module_init(acpi_ipmi_init); | 660 | module_init(acpi_ipmi_init); |