diff options
author | Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> | 2007-03-07 18:29:35 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2007-03-09 23:16:23 -0500 |
commit | 9e197219605513c14d3eae41039ecf1b82d1920d (patch) | |
tree | 143f16569165e172efd12d938003a48f61c7de62 | |
parent | 08e15e81a40e3241ce93b4a43886f3abda184aa6 (diff) |
ACPI: ec: fix race in status register access
Delay the read of the EC status register until
after the event that caused it occurs -- otherwise
it is possible to read and act on stale status that was
associated with the previous event.
Do this with a perpetually incrementing "event_count" to detect
when a new event occurs and it is safe to read status.
There is no workaround for polling mode -- it is inherently
exposed to reading and acting on stale status, since it
doesn't have an interrupt to tell it the event completed.
http://bugzilla.kernel.org/show_bug.cgi?id=8110
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
-rw-r--r-- | drivers/acpi/ec.c | 40 |
1 files changed, 23 insertions, 17 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index ab6888373795..a802962ff2b4 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -100,6 +100,7 @@ static struct acpi_ec { | |||
100 | unsigned long global_lock; | 100 | unsigned long global_lock; |
101 | struct mutex lock; | 101 | struct mutex lock; |
102 | atomic_t query_pending; | 102 | atomic_t query_pending; |
103 | atomic_t event_count; | ||
103 | atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort */ | 104 | atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort */ |
104 | wait_queue_head_t wait; | 105 | wait_queue_head_t wait; |
105 | } *ec_ecdt; | 106 | } *ec_ecdt; |
@@ -131,10 +132,12 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) | |||
131 | outb(data, ec->data_addr); | 132 | outb(data, ec->data_addr); |
132 | } | 133 | } |
133 | 134 | ||
134 | static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event) | 135 | static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event, |
136 | unsigned old_count) | ||
135 | { | 137 | { |
136 | u8 status = acpi_ec_read_status(ec); | 138 | u8 status = acpi_ec_read_status(ec); |
137 | 139 | if (old_count == atomic_read(&ec->event_count)) | |
140 | return 0; | ||
138 | if (event == ACPI_EC_EVENT_OBF_1) { | 141 | if (event == ACPI_EC_EVENT_OBF_1) { |
139 | if (status & ACPI_EC_FLAG_OBF) | 142 | if (status & ACPI_EC_FLAG_OBF) |
140 | return 1; | 143 | return 1; |
@@ -146,19 +149,19 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event) | |||
146 | return 0; | 149 | return 0; |
147 | } | 150 | } |
148 | 151 | ||
149 | static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event) | 152 | static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count) |
150 | { | 153 | { |
151 | if (acpi_ec_mode == EC_POLL) { | 154 | if (acpi_ec_mode == EC_POLL) { |
152 | unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); | 155 | unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); |
153 | while (time_before(jiffies, delay)) { | 156 | while (time_before(jiffies, delay)) { |
154 | if (acpi_ec_check_status(ec, event)) | 157 | if (acpi_ec_check_status(ec, event, 0)) |
155 | return 0; | 158 | return 0; |
156 | } | 159 | } |
157 | } else { | 160 | } else { |
158 | if (wait_event_timeout(ec->wait, | 161 | if (wait_event_timeout(ec->wait, |
159 | acpi_ec_check_status(ec, event), | 162 | acpi_ec_check_status(ec, event, count), |
160 | msecs_to_jiffies(ACPI_EC_DELAY)) || | 163 | msecs_to_jiffies(ACPI_EC_DELAY)) || |
161 | acpi_ec_check_status(ec, event)) { | 164 | acpi_ec_check_status(ec, event, 0)) { |
162 | return 0; | 165 | return 0; |
163 | } else { | 166 | } else { |
164 | printk(KERN_ERR PREFIX "acpi_ec_wait timeout," | 167 | printk(KERN_ERR PREFIX "acpi_ec_wait timeout," |
@@ -225,21 +228,22 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, | |||
225 | u8 * rdata, unsigned rdata_len) | 228 | u8 * rdata, unsigned rdata_len) |
226 | { | 229 | { |
227 | int result = 0; | 230 | int result = 0; |
228 | 231 | unsigned count = atomic_read(&ec->event_count); | |
229 | acpi_ec_write_cmd(ec, command); | 232 | acpi_ec_write_cmd(ec, command); |
230 | 233 | ||
231 | for (; wdata_len > 0; --wdata_len) { | 234 | for (; wdata_len > 0; --wdata_len) { |
232 | result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0); | 235 | result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); |
233 | if (result) { | 236 | if (result) { |
234 | printk(KERN_ERR PREFIX | 237 | printk(KERN_ERR PREFIX |
235 | "write_cmd timeout, command = %d\n", command); | 238 | "write_cmd timeout, command = %d\n", command); |
236 | goto end; | 239 | goto end; |
237 | } | 240 | } |
241 | count = atomic_read(&ec->event_count); | ||
238 | acpi_ec_write_data(ec, *(wdata++)); | 242 | acpi_ec_write_data(ec, *(wdata++)); |
239 | } | 243 | } |
240 | 244 | ||
241 | if (!rdata_len) { | 245 | if (!rdata_len) { |
242 | result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0); | 246 | result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); |
243 | if (result) { | 247 | if (result) { |
244 | printk(KERN_ERR PREFIX | 248 | printk(KERN_ERR PREFIX |
245 | "finish-write timeout, command = %d\n", command); | 249 | "finish-write timeout, command = %d\n", command); |
@@ -250,13 +254,13 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, | |||
250 | } | 254 | } |
251 | 255 | ||
252 | for (; rdata_len > 0; --rdata_len) { | 256 | for (; rdata_len > 0; --rdata_len) { |
253 | result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1); | 257 | result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count); |
254 | if (result) { | 258 | if (result) { |
255 | printk(KERN_ERR PREFIX "read timeout, command = %d\n", | 259 | printk(KERN_ERR PREFIX "read timeout, command = %d\n", |
256 | command); | 260 | command); |
257 | goto end; | 261 | goto end; |
258 | } | 262 | } |
259 | 263 | count = atomic_read(&ec->event_count); | |
260 | *(rdata++) = acpi_ec_read_data(ec); | 264 | *(rdata++) = acpi_ec_read_data(ec); |
261 | } | 265 | } |
262 | end: | 266 | end: |
@@ -288,7 +292,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, | |||
288 | /* Make sure GPE is enabled before doing transaction */ | 292 | /* Make sure GPE is enabled before doing transaction */ |
289 | acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); | 293 | acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); |
290 | 294 | ||
291 | status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0); | 295 | status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0); |
292 | if (status) { | 296 | if (status) { |
293 | printk(KERN_DEBUG PREFIX | 297 | printk(KERN_DEBUG PREFIX |
294 | "input buffer is not empty, aborting transaction\n"); | 298 | "input buffer is not empty, aborting transaction\n"); |
@@ -369,8 +373,8 @@ int ec_write(u8 addr, u8 val) | |||
369 | EXPORT_SYMBOL(ec_write); | 373 | EXPORT_SYMBOL(ec_write); |
370 | 374 | ||
371 | int ec_transaction(u8 command, | 375 | int ec_transaction(u8 command, |
372 | const u8 * wdata, unsigned wdata_len, | 376 | const u8 * wdata, unsigned wdata_len, |
373 | u8 * rdata, unsigned rdata_len) | 377 | u8 * rdata, unsigned rdata_len) |
374 | { | 378 | { |
375 | struct acpi_ec *ec; | 379 | struct acpi_ec *ec; |
376 | 380 | ||
@@ -435,7 +439,7 @@ static u32 acpi_ec_gpe_handler(void *data) | |||
435 | acpi_status status = AE_OK; | 439 | acpi_status status = AE_OK; |
436 | u8 value; | 440 | u8 value; |
437 | struct acpi_ec *ec = (struct acpi_ec *)data; | 441 | struct acpi_ec *ec = (struct acpi_ec *)data; |
438 | 442 | atomic_inc(&ec->event_count); | |
439 | if (acpi_ec_mode == EC_INTR) { | 443 | if (acpi_ec_mode == EC_INTR) { |
440 | wake_up(&ec->wait); | 444 | wake_up(&ec->wait); |
441 | } | 445 | } |
@@ -633,6 +637,7 @@ static int acpi_ec_add(struct acpi_device *device) | |||
633 | ec->uid = -1; | 637 | ec->uid = -1; |
634 | mutex_init(&ec->lock); | 638 | mutex_init(&ec->lock); |
635 | atomic_set(&ec->query_pending, 0); | 639 | atomic_set(&ec->query_pending, 0); |
640 | atomic_set(&ec->event_count, 1); | ||
636 | if (acpi_ec_mode == EC_INTR) { | 641 | if (acpi_ec_mode == EC_INTR) { |
637 | atomic_set(&ec->leaving_burst, 1); | 642 | atomic_set(&ec->leaving_burst, 1); |
638 | init_waitqueue_head(&ec->wait); | 643 | init_waitqueue_head(&ec->wait); |
@@ -807,6 +812,7 @@ acpi_fake_ecdt_callback(acpi_handle handle, | |||
807 | acpi_status status; | 812 | acpi_status status; |
808 | 813 | ||
809 | mutex_init(&ec_ecdt->lock); | 814 | mutex_init(&ec_ecdt->lock); |
815 | atomic_set(&ec_ecdt->event_count, 1); | ||
810 | if (acpi_ec_mode == EC_INTR) { | 816 | if (acpi_ec_mode == EC_INTR) { |
811 | init_waitqueue_head(&ec_ecdt->wait); | 817 | init_waitqueue_head(&ec_ecdt->wait); |
812 | } | 818 | } |
@@ -888,6 +894,7 @@ static int __init acpi_ec_get_real_ecdt(void) | |||
888 | return -ENOMEM; | 894 | return -ENOMEM; |
889 | 895 | ||
890 | mutex_init(&ec_ecdt->lock); | 896 | mutex_init(&ec_ecdt->lock); |
897 | atomic_set(&ec_ecdt->event_count, 1); | ||
891 | if (acpi_ec_mode == EC_INTR) { | 898 | if (acpi_ec_mode == EC_INTR) { |
892 | init_waitqueue_head(&ec_ecdt->wait); | 899 | init_waitqueue_head(&ec_ecdt->wait); |
893 | } | 900 | } |
@@ -1016,8 +1023,7 @@ static int __init acpi_ec_set_intr_mode(char *str) | |||
1016 | acpi_ec_mode = EC_POLL; | 1023 | acpi_ec_mode = EC_POLL; |
1017 | } | 1024 | } |
1018 | acpi_ec_driver.ops.add = acpi_ec_add; | 1025 | acpi_ec_driver.ops.add = acpi_ec_add; |
1019 | printk(KERN_NOTICE PREFIX "%s mode.\n", | 1026 | printk(KERN_NOTICE PREFIX "%s mode.\n", intr ? "interrupt" : "polling"); |
1020 | intr ? "interrupt" : "polling"); | ||
1021 | 1027 | ||
1022 | return 1; | 1028 | return 1; |
1023 | } | 1029 | } |