aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/acpi_ipmi.c
diff options
context:
space:
mode:
authorLv Zheng <lv.zheng@intel.com>2013-09-13 01:13:54 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-09-30 13:46:12 -0400
commita1a69b297e4775298d6407357332ea1adc218396 (patch)
tree81328e97b9e6fdce5b81ae17a22857f963a50fe7 /drivers/acpi/acpi_ipmi.c
parent8584ec6ae9cc386de344e0d33b60f76368bb73ab (diff)
ACPI / IPMI: Fix race caused by the unprotected ACPI IPMI user
This patch uses reference counting to fix the race caused by the unprotected ACPI IPMI user. There are two rules for using the ipmi_si APIs: 1. In ipmi_si, ipmi_destroy_user() can ensure that no ipmi_recv_msg will be passed to ipmi_msg_handler(), but ipmi_request_settime() can not use an invalid ipmi_user_t. This means the ipmi_si users must ensure that there won't be any local references on ipmi_user_t before invoking ipmi_destroy_user(). 2. In ipmi_si, the smi_gone()/new_smi() callbacks are protected by smi_watchers_mutex, so their execution is serialized. But as a new smi can re-use a freed intf_num, it requires that the callback implementation must not use intf_num as an identification mean or it must ensure all references to the previous smi are all dropped before exiting smi_gone() callback. As the acpi_ipmi_device->user_interface check in acpi_ipmi_space_handler() can happen before setting user_interface to NULL and codes after the check in acpi_ipmi_space_handler() can happen after user_interface becomes NULL, the on-going acpi_ipmi_space_handler() still can pass an invalid acpi_ipmi_device->user_interface to ipmi_request_settime(). Such race conditions are not allowed by the IPMI layer's API design as a crash will happen in ipmi_request_settime() if something like that happens. This patch follows the ipmi_devintf.c design: 1. Invoke ipmi_destroy_user() after the reference count of acpi_ipmi_device drops to 0. References of acpi_ipmi_device dropping to 0 also means tx_msg related to this acpi_ipmi_device are all freed. This matches the IPMI layer's API calling rule on ipmi_destroy_user() and ipmi_request_settime(). 2. ipmi_flush_tx_msg() is performed so that no on-going tx_msg can still be running in acpi_ipmi_space_handler(). And it is invoked after invoking __ipmi_dev_kill() where acpi_ipmi_device is deleted from the list with a "dead" flag set, and the "dead" flag check is also introduced to the point where a tx_msg is going to be added to the tx_msg_list so that no new tx_msg can be created after returning from the __ipmi_dev_kill(). 3. The waiting codes in ipmi_flush_tx_msg() is deleted because it is not required since this patch ensures no acpi_ipmi reference is still held for ipmi_user_t before calling ipmi_destroy_user() and ipmi_destroy_user() can ensure no more ipmi_msg_handler() can happen after returning from ipmi_destroy_user(). 4. The flushing of tx_msg is also moved out of ipmi_lock in this patch. The forthcoming IPMI operation region handler installation changes also requires acpi_ipmi_device be handled in this style. The header comment of the file is also updated due to this design change. Signed-off-by: Lv Zheng <lv.zheng@intel.com> Reviewed-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/acpi/acpi_ipmi.c')
-rw-r--r--drivers/acpi/acpi_ipmi.c249
1 files changed, 156 insertions, 93 deletions
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index 9171a1a668f2..b285386eb37f 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * acpi_ipmi.c - ACPI IPMI opregion 2 * acpi_ipmi.c - ACPI IPMI opregion
3 * 3 *
4 * Copyright (C) 2010 Intel Corporation 4 * Copyright (C) 2010, 2013 Intel Corporation
5 * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com> 5 * Author: Zhao Yakui <yakui.zhao@intel.com>
6 * Lv Zheng <lv.zheng@intel.com>
6 * 7 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * 9 *
@@ -67,6 +68,8 @@ struct acpi_ipmi_device {
67 long curr_msgid; 68 long curr_msgid;
68 unsigned long flags; 69 unsigned long flags;
69 struct ipmi_smi_info smi_data; 70 struct ipmi_smi_info smi_data;
71 bool dead;
72 struct kref kref;
70}; 73};
71 74
72struct ipmi_driver_data { 75struct ipmi_driver_data {
@@ -107,8 +110,8 @@ struct acpi_ipmi_buffer {
107static void ipmi_register_bmc(int iface, struct device *dev); 110static void ipmi_register_bmc(int iface, struct device *dev);
108static void ipmi_bmc_gone(int iface); 111static void ipmi_bmc_gone(int iface);
109static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); 112static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
110static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device); 113static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi);
111static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device); 114static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi);
112 115
113static struct ipmi_driver_data driver_data = { 116static struct ipmi_driver_data driver_data = {
114 .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices), 117 .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
@@ -122,6 +125,88 @@ static struct ipmi_driver_data driver_data = {
122 }, 125 },
123}; 126};
124 127
128static struct acpi_ipmi_device *
129ipmi_dev_alloc(int iface, struct ipmi_smi_info *smi_data, acpi_handle handle)
130{
131 struct acpi_ipmi_device *ipmi_device;
132 int err;
133 ipmi_user_t user;
134
135 ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
136 if (!ipmi_device)
137 return NULL;
138
139 kref_init(&ipmi_device->kref);
140 INIT_LIST_HEAD(&ipmi_device->head);
141 INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
142 spin_lock_init(&ipmi_device->tx_msg_lock);
143
144 ipmi_device->handle = handle;
145 ipmi_device->pnp_dev = to_pnp_dev(get_device(smi_data->dev));
146 memcpy(&ipmi_device->smi_data, smi_data, sizeof(struct ipmi_smi_info));
147 ipmi_device->ipmi_ifnum = iface;
148
149 err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
150 ipmi_device, &user);
151 if (err) {
152 put_device(smi_data->dev);
153 kfree(ipmi_device);
154 return NULL;
155 }
156 ipmi_device->user_interface = user;
157 ipmi_install_space_handler(ipmi_device);
158
159 return ipmi_device;
160}
161
162static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
163{
164 ipmi_remove_space_handler(ipmi_device);
165 ipmi_destroy_user(ipmi_device->user_interface);
166 put_device(ipmi_device->smi_data.dev);
167 kfree(ipmi_device);
168}
169
170static void ipmi_dev_release_kref(struct kref *kref)
171{
172 struct acpi_ipmi_device *ipmi =
173 container_of(kref, struct acpi_ipmi_device, kref);
174
175 ipmi_dev_release(ipmi);
176}
177
178static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
179{
180 list_del(&ipmi_device->head);
181 /*
182 * Always setting dead flag after deleting from the list or
183 * list_for_each_entry() codes must get changed.
184 */
185 ipmi_device->dead = true;
186}
187
188static struct acpi_ipmi_device *acpi_ipmi_dev_get(int iface)
189{
190 struct acpi_ipmi_device *temp, *ipmi_device = NULL;
191
192 mutex_lock(&driver_data.ipmi_lock);
193 list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
194 if (temp->ipmi_ifnum == iface) {
195 ipmi_device = temp;
196 kref_get(&ipmi_device->kref);
197 break;
198 }
199 }
200 mutex_unlock(&driver_data.ipmi_lock);
201
202 return ipmi_device;
203}
204
205static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
206{
207 kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
208}
209
125static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi) 210static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
126{ 211{
127 struct acpi_ipmi_msg *ipmi_msg; 212 struct acpi_ipmi_msg *ipmi_msg;
@@ -220,25 +305,22 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
220static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) 305static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
221{ 306{
222 struct acpi_ipmi_msg *tx_msg, *temp; 307 struct acpi_ipmi_msg *tx_msg, *temp;
223 int count = HZ / 10;
224 struct pnp_dev *pnp_dev = ipmi->pnp_dev;
225 unsigned long flags; 308 unsigned long flags;
226 309
310 /*
311 * NOTE: On-going ipmi_recv_msg
312 * ipmi_msg_handler() may still be invoked by ipmi_si after
313 * flushing. But it is safe to do a fast flushing on module_exit()
314 * without waiting for all ipmi_recv_msg(s) to complete from
315 * ipmi_msg_handler() as it is ensured by ipmi_si that all
316 * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
317 */
227 spin_lock_irqsave(&ipmi->tx_msg_lock, flags); 318 spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
228 list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { 319 list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
229 /* wake up the sleep thread on the Tx msg */ 320 /* wake up the sleep thread on the Tx msg */
230 complete(&tx_msg->tx_complete); 321 complete(&tx_msg->tx_complete);
231 } 322 }
232 spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags); 323 spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
233
234 /* wait for about 100ms to flush the tx message list */
235 while (count--) {
236 if (list_empty(&ipmi->tx_msg_list))
237 break;
238 schedule_timeout(1);
239 }
240 if (!list_empty(&ipmi->tx_msg_list))
241 dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
242} 324}
243 325
244static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) 326static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
@@ -302,7 +384,6 @@ static void ipmi_register_bmc(int iface, struct device *dev)
302{ 384{
303 struct acpi_ipmi_device *ipmi_device, *temp; 385 struct acpi_ipmi_device *ipmi_device, *temp;
304 struct pnp_dev *pnp_dev; 386 struct pnp_dev *pnp_dev;
305 ipmi_user_t user;
306 int err; 387 int err;
307 struct ipmi_smi_info smi_data; 388 struct ipmi_smi_info smi_data;
308 acpi_handle handle; 389 acpi_handle handle;
@@ -312,12 +393,18 @@ static void ipmi_register_bmc(int iface, struct device *dev)
312 if (err) 393 if (err)
313 return; 394 return;
314 395
315 if (smi_data.addr_src != SI_ACPI) { 396 if (smi_data.addr_src != SI_ACPI)
316 put_device(smi_data.dev); 397 goto err_ref;
317 return;
318 }
319
320 handle = smi_data.addr_info.acpi_info.acpi_handle; 398 handle = smi_data.addr_info.acpi_info.acpi_handle;
399 if (!handle)
400 goto err_ref;
401 pnp_dev = to_pnp_dev(smi_data.dev);
402
403 ipmi_device = ipmi_dev_alloc(iface, &smi_data, handle);
404 if (!ipmi_device) {
405 dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
406 goto err_ref;
407 }
321 408
322 mutex_lock(&driver_data.ipmi_lock); 409 mutex_lock(&driver_data.ipmi_lock);
323 list_for_each_entry(temp, &driver_data.ipmi_devices, head) { 410 list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
@@ -326,34 +413,18 @@ static void ipmi_register_bmc(int iface, struct device *dev)
326 * to the device list, don't add it again. 413 * to the device list, don't add it again.
327 */ 414 */
328 if (temp->handle == handle) 415 if (temp->handle == handle)
329 goto out; 416 goto err_lock;
330 } 417 }
331 418
332 ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL); 419 list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
333
334 if (!ipmi_device)
335 goto out;
336
337 pnp_dev = to_pnp_dev(smi_data.dev);
338 ipmi_device->handle = handle;
339 ipmi_device->pnp_dev = pnp_dev;
340
341 err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
342 ipmi_device, &user);
343 if (err) {
344 dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
345 kfree(ipmi_device);
346 goto out;
347 }
348 acpi_add_ipmi_device(ipmi_device);
349 ipmi_device->user_interface = user;
350 ipmi_device->ipmi_ifnum = iface;
351 mutex_unlock(&driver_data.ipmi_lock); 420 mutex_unlock(&driver_data.ipmi_lock);
352 memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info)); 421 put_device(smi_data.dev);
353 return; 422 return;
354 423
355out: 424err_lock:
356 mutex_unlock(&driver_data.ipmi_lock); 425 mutex_unlock(&driver_data.ipmi_lock);
426 ipmi_dev_release(ipmi_device);
427err_ref:
357 put_device(smi_data.dev); 428 put_device(smi_data.dev);
358 return; 429 return;
359} 430}
@@ -361,19 +432,22 @@ out:
361static void ipmi_bmc_gone(int iface) 432static void ipmi_bmc_gone(int iface)
362{ 433{
363 struct acpi_ipmi_device *ipmi_device, *temp; 434 struct acpi_ipmi_device *ipmi_device, *temp;
435 bool dev_found = false;
364 436
365 mutex_lock(&driver_data.ipmi_lock); 437 mutex_lock(&driver_data.ipmi_lock);
366 list_for_each_entry_safe(ipmi_device, temp, 438 list_for_each_entry_safe(ipmi_device, temp,
367 &driver_data.ipmi_devices, head) { 439 &driver_data.ipmi_devices, head) {
368 if (ipmi_device->ipmi_ifnum != iface) 440 if (ipmi_device->ipmi_ifnum != iface) {
369 continue; 441 dev_found = true;
370 442 __ipmi_dev_kill(ipmi_device);
371 acpi_remove_ipmi_device(ipmi_device); 443 break;
372 put_device(ipmi_device->smi_data.dev); 444 }
373 kfree(ipmi_device);
374 break;
375 } 445 }
376 mutex_unlock(&driver_data.ipmi_lock); 446 mutex_unlock(&driver_data.ipmi_lock);
447 if (dev_found) {
448 ipmi_flush_tx_msg(ipmi_device);
449 acpi_ipmi_dev_put(ipmi_device);
450 }
377} 451}
378/* -------------------------------------------------------------------------- 452/* --------------------------------------------------------------------------
379 * Address Space Management 453 * Address Space Management
@@ -397,7 +471,8 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
397 void *handler_context, void *region_context) 471 void *handler_context, void *region_context)
398{ 472{
399 struct acpi_ipmi_msg *tx_msg; 473 struct acpi_ipmi_msg *tx_msg;
400 struct acpi_ipmi_device *ipmi_device = handler_context; 474 int iface = (long)handler_context;
475 struct acpi_ipmi_device *ipmi_device;
401 int err; 476 int err;
402 acpi_status status; 477 acpi_status status;
403 unsigned long flags; 478 unsigned long flags;
@@ -410,20 +485,31 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
410 if ((function & ACPI_IO_MASK) == ACPI_READ) 485 if ((function & ACPI_IO_MASK) == ACPI_READ)
411 return AE_TYPE; 486 return AE_TYPE;
412 487
413 if (!ipmi_device->user_interface) 488 ipmi_device = acpi_ipmi_dev_get(iface);
489 if (!ipmi_device)
414 return AE_NOT_EXIST; 490 return AE_NOT_EXIST;
415 491
416 tx_msg = acpi_alloc_ipmi_msg(ipmi_device); 492 tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
417 if (!tx_msg) 493 if (!tx_msg) {
418 return AE_NO_MEMORY; 494 status = AE_NO_MEMORY;
495 goto out_ref;
496 }
419 497
420 if (acpi_format_ipmi_request(tx_msg, address, value) != 0) { 498 if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
421 status = AE_TYPE; 499 status = AE_TYPE;
422 goto out_msg; 500 goto out_msg;
423 } 501 }
502 mutex_lock(&driver_data.ipmi_lock);
503 /* Do not add a tx_msg that can not be flushed. */
504 if (ipmi_device->dead) {
505 status = AE_NOT_EXIST;
506 mutex_unlock(&driver_data.ipmi_lock);
507 goto out_msg;
508 }
424 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); 509 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
425 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); 510 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
426 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); 511 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
512 mutex_unlock(&driver_data.ipmi_lock);
427 err = ipmi_request_settime(ipmi_device->user_interface, 513 err = ipmi_request_settime(ipmi_device->user_interface,
428 &tx_msg->addr, 514 &tx_msg->addr,
429 tx_msg->tx_msgid, 515 tx_msg->tx_msgid,
@@ -443,6 +529,8 @@ out_list:
443 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); 529 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
444out_msg: 530out_msg:
445 kfree(tx_msg); 531 kfree(tx_msg);
532out_ref:
533 acpi_ipmi_dev_put(ipmi_device);
446 return status; 534 return status;
447} 535}
448 536
@@ -465,9 +553,8 @@ static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
465 return 0; 553 return 0;
466 554
467 status = acpi_install_address_space_handler(ipmi->handle, 555 status = acpi_install_address_space_handler(ipmi->handle,
468 ACPI_ADR_SPACE_IPMI, 556 ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler,
469 &acpi_ipmi_space_handler, 557 NULL, (void *)((long)ipmi->ipmi_ifnum));
470 NULL, ipmi);
471 if (ACPI_FAILURE(status)) { 558 if (ACPI_FAILURE(status)) {
472 struct pnp_dev *pnp_dev = ipmi->pnp_dev; 559 struct pnp_dev *pnp_dev = ipmi->pnp_dev;
473 dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space " 560 dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
@@ -478,36 +565,6 @@ static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
478 return 0; 565 return 0;
479} 566}
480 567
481static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
482{
483
484 INIT_LIST_HEAD(&ipmi_device->head);
485
486 spin_lock_init(&ipmi_device->tx_msg_lock);
487 INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
488 ipmi_install_space_handler(ipmi_device);
489
490 list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
491}
492
493static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
494{
495 /*
496 * If the IPMI user interface is created, it should be
497 * destroyed.
498 */
499 if (ipmi_device->user_interface) {
500 ipmi_destroy_user(ipmi_device->user_interface);
501 ipmi_device->user_interface = NULL;
502 }
503 /* flush the Tx_msg list */
504 if (!list_empty(&ipmi_device->tx_msg_list))
505 ipmi_flush_tx_msg(ipmi_device);
506
507 list_del(&ipmi_device->head);
508 ipmi_remove_space_handler(ipmi_device);
509}
510
511static int __init acpi_ipmi_init(void) 568static int __init acpi_ipmi_init(void)
512{ 569{
513 int result = 0; 570 int result = 0;
@@ -524,7 +581,7 @@ static int __init acpi_ipmi_init(void)
524 581
525static void __exit acpi_ipmi_exit(void) 582static void __exit acpi_ipmi_exit(void)
526{ 583{
527 struct acpi_ipmi_device *ipmi_device, *temp; 584 struct acpi_ipmi_device *ipmi_device;
528 585
529 if (acpi_disabled) 586 if (acpi_disabled)
530 return; 587 return;
@@ -538,11 +595,17 @@ static void __exit acpi_ipmi_exit(void)
538 * handler and free it. 595 * handler and free it.
539 */ 596 */
540 mutex_lock(&driver_data.ipmi_lock); 597 mutex_lock(&driver_data.ipmi_lock);
541 list_for_each_entry_safe(ipmi_device, temp, 598 while (!list_empty(&driver_data.ipmi_devices)) {
542 &driver_data.ipmi_devices, head) { 599 ipmi_device = list_first_entry(&driver_data.ipmi_devices,
543 acpi_remove_ipmi_device(ipmi_device); 600 struct acpi_ipmi_device,
544 put_device(ipmi_device->smi_data.dev); 601 head);
545 kfree(ipmi_device); 602 __ipmi_dev_kill(ipmi_device);
603 mutex_unlock(&driver_data.ipmi_lock);
604
605 ipmi_flush_tx_msg(ipmi_device);
606 acpi_ipmi_dev_put(ipmi_device);
607
608 mutex_lock(&driver_data.ipmi_lock);
546 } 609 }
547 mutex_unlock(&driver_data.ipmi_lock); 610 mutex_unlock(&driver_data.ipmi_lock);
548} 611}