aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLv Zheng <lv.zheng@intel.com>2015-02-05 19:58:10 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-02-06 09:48:10 -0500
commitf252cb09e1cb46834014aaa3814fbfb2352e9071 (patch)
treeeb1ecf18723665754d6ba384009150cc368d0c0d
parente1d4d90fc0313d3d58cbd7912c90f8ef24df45ff (diff)
ACPI / EC: Add query flushing support
This patch implementes the QR_EC flushing support. Grace periods are implemented from the detection of an SCI_EVT to the submission/completion of the QR_EC transaction. During this period, all EC command transactions are allowed to be submitted. Note that query periods and event periods are intentionally distiguished to allow further improvements. 1. Query period: from the detection of an SCI_EVT to the sumission of the QR_EC command. This period is used for storming prevention, as currently QR_EC is deferred to a work queue rather than directly issued from the IRQ context even there is no other transactions pending, so malicous SCI_EVT GPE can act like "level triggered" to trigger a GPE storm. We need to be prepared for this. And in the future, we may change it to be a part of the advance_transaction() where we will try QR_EC submission in appropriate positions to avoid such GPE storming. 2. Event period: from the detection of an SCI_EVT to the completion of the QR_EC command. We may extend it to the completion of _Qxx evaluation. This is actually a grace period for event flushing, but we only flush queries due to the reason stated in known issue 1. That's also why we use EC_FLAGS_EVENT_xxx. During this period, QR_EC transactions need to pass the flushable submission check. In this patch, the following flags are implemented: 1. EC_FLAGS_EVENT_ENABLED: this is derived from the old EC_FLAGS_QUERY_PENDING flag which can block SCI_EVT handlings. With this flag, the logics implemented by the original flag are extended: 1. Old logic: unless both of the flags are set, the event poller will not be scheduled, and 2. New logic: as soon as both of the flags are set, the evet poller will be scheduled. 2. EC_FLAGS_EVENT_DETECTED: this is also derived from the old EC_FLAGS_QUERY_PENDING flag which can block SCI_EVT detection. It thus can be used to indicate the storming prevention period for query submission. acpi_ec_submit_request()/acpi_ec_complete_request() are invoked to implement this period so that acpi_set_gpe() can be invoked under the "reference count > 0" condition. 3. EC_FLAGS_EVENT_PENDING: this is newly added to indicate the grace period for event flushing (query flushing for now). acpi_ec_submit_request()/acpi_ec_complete_request() are invoked to implement this period so that the flushing process can wait until the event handling (query transaction for now) to be completed. Link: https://bugzilla.kernel.org/show_bug.cgi?id=82611 Link: https://bugzilla.kernel.org/show_bug.cgi?id=77431 Signed-off-by: Lv Zheng <lv.zheng@intel.com> Tested-by: Ortwin Glück <odi@odi.ch> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/acpi/ec.c101
1 files changed, 85 insertions, 16 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 982b67faaaf3..40002ae7db2b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -76,7 +76,9 @@ enum ec_command {
76 * when trying to clear the EC */ 76 * when trying to clear the EC */
77 77
78enum { 78enum {
79 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 79 EC_FLAGS_EVENT_ENABLED, /* Event is enabled */
80 EC_FLAGS_EVENT_PENDING, /* Event is pending */
81 EC_FLAGS_EVENT_DETECTED, /* Event is detected */
80 EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and 82 EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
81 * OpReg are installed */ 83 * OpReg are installed */
82 EC_FLAGS_STARTED, /* Driver is started */ 84 EC_FLAGS_STARTED, /* Driver is started */
@@ -151,6 +153,12 @@ static bool acpi_ec_flushed(struct acpi_ec *ec)
151 return ec->reference_count == 1; 153 return ec->reference_count == 1;
152} 154}
153 155
156static bool acpi_ec_has_pending_event(struct acpi_ec *ec)
157{
158 return test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
159 test_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
160}
161
154/* -------------------------------------------------------------------------- 162/* --------------------------------------------------------------------------
155 * EC Registers 163 * EC Registers
156 * -------------------------------------------------------------------------- */ 164 * -------------------------------------------------------------------------- */
@@ -318,36 +326,93 @@ static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
318 * the flush operation is not in 326 * the flush operation is not in
319 * progress 327 * progress
320 * @ec: the EC device 328 * @ec: the EC device
329 * @allow_event: whether event should be handled
321 * 330 *
322 * This function must be used before taking a new action that should hold 331 * This function must be used before taking a new action that should hold
323 * the reference count. If this function returns false, then the action 332 * the reference count. If this function returns false, then the action
324 * must be discarded or it will prevent the flush operation from being 333 * must be discarded or it will prevent the flush operation from being
325 * completed. 334 * completed.
335 *
336 * During flushing, QR_EC command need to pass this check when there is a
337 * pending event, so that the reference count held for the pending event
338 * can be decreased by the completion of the QR_EC command.
326 */ 339 */
327static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) 340static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec,
341 bool allow_event)
328{ 342{
329 if (!acpi_ec_started(ec)) 343 if (!acpi_ec_started(ec)) {
330 return false; 344 if (!allow_event || !acpi_ec_has_pending_event(ec))
345 return false;
346 }
331 acpi_ec_submit_request(ec); 347 acpi_ec_submit_request(ec);
332 return true; 348 return true;
333} 349}
334 350
335static void acpi_ec_submit_query(struct acpi_ec *ec) 351static void acpi_ec_submit_event(struct acpi_ec *ec)
336{ 352{
337 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { 353 if (!test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
338 pr_debug("***** Event started *****\n"); 354 !test_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags))
355 return;
356 /* Hold reference for pending event */
357 if (!acpi_ec_submit_flushable_request(ec, true))
358 return;
359 if (!test_and_set_bit(EC_FLAGS_EVENT_PENDING, &ec->flags)) {
360 pr_debug("***** Event query started *****\n");
339 schedule_work(&ec->work); 361 schedule_work(&ec->work);
362 return;
363 }
364 acpi_ec_complete_request(ec);
365}
366
367static void acpi_ec_complete_event(struct acpi_ec *ec)
368{
369 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
370 clear_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
371 pr_debug("***** Event query stopped *****\n");
372 /* Unhold reference for pending event */
373 acpi_ec_complete_request(ec);
374 /* Check if there is another SCI_EVT detected */
375 acpi_ec_submit_event(ec);
376 }
377}
378
379static void acpi_ec_submit_detection(struct acpi_ec *ec)
380{
381 /* Hold reference for query submission */
382 if (!acpi_ec_submit_flushable_request(ec, false))
383 return;
384 if (!test_and_set_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags)) {
385 pr_debug("***** Event detection blocked *****\n");
386 acpi_ec_submit_event(ec);
387 return;
340 } 388 }
389 acpi_ec_complete_request(ec);
341} 390}
342 391
343static void acpi_ec_complete_query(struct acpi_ec *ec) 392static void acpi_ec_complete_detection(struct acpi_ec *ec)
344{ 393{
345 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { 394 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
346 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 395 clear_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags);
347 pr_debug("***** Event stopped *****\n"); 396 pr_debug("***** Event detetion unblocked *****\n");
397 /* Unhold reference for query submission */
398 acpi_ec_complete_request(ec);
348 } 399 }
349} 400}
350 401
402static void acpi_ec_enable_event(struct acpi_ec *ec)
403{
404 unsigned long flags;
405
406 spin_lock_irqsave(&ec->lock, flags);
407 set_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags);
408 /*
409 * An event may be pending even with SCI_EVT=0, so QR_EC should
410 * always be issued right after started.
411 */
412 acpi_ec_submit_detection(ec);
413 spin_unlock_irqrestore(&ec->lock, flags);
414}
415
351static int ec_transaction_completed(struct acpi_ec *ec) 416static int ec_transaction_completed(struct acpi_ec *ec)
352{ 417{
353 unsigned long flags; 418 unsigned long flags;
@@ -389,6 +454,7 @@ static void advance_transaction(struct acpi_ec *ec)
389 t->rdata[t->ri++] = acpi_ec_read_data(ec); 454 t->rdata[t->ri++] = acpi_ec_read_data(ec);
390 if (t->rlen == t->ri) { 455 if (t->rlen == t->ri) {
391 t->flags |= ACPI_EC_COMMAND_COMPLETE; 456 t->flags |= ACPI_EC_COMMAND_COMPLETE;
457 acpi_ec_complete_event(ec);
392 if (t->command == ACPI_EC_COMMAND_QUERY) 458 if (t->command == ACPI_EC_COMMAND_QUERY)
393 pr_debug("***** Command(%s) hardware completion *****\n", 459 pr_debug("***** Command(%s) hardware completion *****\n",
394 acpi_ec_cmd_string(t->command)); 460 acpi_ec_cmd_string(t->command));
@@ -399,6 +465,7 @@ static void advance_transaction(struct acpi_ec *ec)
399 } else if (t->wlen == t->wi && 465 } else if (t->wlen == t->wi &&
400 (status & ACPI_EC_FLAG_IBF) == 0) { 466 (status & ACPI_EC_FLAG_IBF) == 0) {
401 t->flags |= ACPI_EC_COMMAND_COMPLETE; 467 t->flags |= ACPI_EC_COMMAND_COMPLETE;
468 acpi_ec_complete_event(ec);
402 wakeup = true; 469 wakeup = true;
403 } 470 }
404 goto out; 471 goto out;
@@ -407,16 +474,17 @@ static void advance_transaction(struct acpi_ec *ec)
407 !(status & ACPI_EC_FLAG_SCI) && 474 !(status & ACPI_EC_FLAG_SCI) &&
408 (t->command == ACPI_EC_COMMAND_QUERY)) { 475 (t->command == ACPI_EC_COMMAND_QUERY)) {
409 t->flags |= ACPI_EC_COMMAND_POLL; 476 t->flags |= ACPI_EC_COMMAND_POLL;
410 acpi_ec_complete_query(ec); 477 acpi_ec_complete_detection(ec);
411 t->rdata[t->ri++] = 0x00; 478 t->rdata[t->ri++] = 0x00;
412 t->flags |= ACPI_EC_COMMAND_COMPLETE; 479 t->flags |= ACPI_EC_COMMAND_COMPLETE;
480 acpi_ec_complete_event(ec);
413 pr_debug("***** Command(%s) software completion *****\n", 481 pr_debug("***** Command(%s) software completion *****\n",
414 acpi_ec_cmd_string(t->command)); 482 acpi_ec_cmd_string(t->command));
415 wakeup = true; 483 wakeup = true;
416 } else if ((status & ACPI_EC_FLAG_IBF) == 0) { 484 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
417 acpi_ec_write_cmd(ec, t->command); 485 acpi_ec_write_cmd(ec, t->command);
418 t->flags |= ACPI_EC_COMMAND_POLL; 486 t->flags |= ACPI_EC_COMMAND_POLL;
419 acpi_ec_complete_query(ec); 487 acpi_ec_complete_detection(ec);
420 } else 488 } else
421 goto err; 489 goto err;
422 goto out; 490 goto out;
@@ -437,7 +505,7 @@ err:
437 } 505 }
438out: 506out:
439 if (status & ACPI_EC_FLAG_SCI) 507 if (status & ACPI_EC_FLAG_SCI)
440 acpi_ec_submit_query(ec); 508 acpi_ec_submit_detection(ec);
441 if (wakeup && in_interrupt()) 509 if (wakeup && in_interrupt())
442 wake_up(&ec->wait); 510 wake_up(&ec->wait);
443} 511}
@@ -498,7 +566,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
498 /* start transaction */ 566 /* start transaction */
499 spin_lock_irqsave(&ec->lock, tmp); 567 spin_lock_irqsave(&ec->lock, tmp);
500 /* Enable GPE for command processing (IBF=0/OBF=1) */ 568 /* Enable GPE for command processing (IBF=0/OBF=1) */
501 if (!acpi_ec_submit_flushable_request(ec)) { 569 if (!acpi_ec_submit_flushable_request(ec, true)) {
502 ret = -EINVAL; 570 ret = -EINVAL;
503 goto unlock; 571 goto unlock;
504 } 572 }
@@ -879,7 +947,9 @@ static void acpi_ec_gpe_poller(struct work_struct *work)
879{ 947{
880 struct acpi_ec *ec = container_of(work, struct acpi_ec, work); 948 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
881 949
950 pr_debug("***** Event poller started *****\n");
882 acpi_ec_query(ec, NULL); 951 acpi_ec_query(ec, NULL);
952 pr_debug("***** Event poller stopped *****\n");
883} 953}
884 954
885static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, 955static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -949,7 +1019,6 @@ static struct acpi_ec *make_acpi_ec(void)
949 1019
950 if (!ec) 1020 if (!ec)
951 return NULL; 1021 return NULL;
952 ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
953 mutex_init(&ec->mutex); 1022 mutex_init(&ec->mutex);
954 init_waitqueue_head(&ec->wait); 1023 init_waitqueue_head(&ec->wait);
955 INIT_LIST_HEAD(&ec->list); 1024 INIT_LIST_HEAD(&ec->list);
@@ -1100,7 +1169,7 @@ static int acpi_ec_add(struct acpi_device *device)
1100 ret = ec_install_handlers(ec); 1169 ret = ec_install_handlers(ec);
1101 1170
1102 /* EC is fully operational, allow queries */ 1171 /* EC is fully operational, allow queries */
1103 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 1172 acpi_ec_enable_event(ec);
1104 1173
1105 /* Clear stale _Q events if hardware might require that */ 1174 /* Clear stale _Q events if hardware might require that */
1106 if (EC_FLAGS_CLEAR_ON_RESUME) 1175 if (EC_FLAGS_CLEAR_ON_RESUME)