aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/ec.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/ec.c')
-rw-r--r--drivers/acpi/ec.c146
1 files changed, 70 insertions, 76 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 7b4178393e34..06b78e5e33a1 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -65,16 +65,18 @@ enum ec_command {
65/* EC events */ 65/* EC events */
66enum ec_event { 66enum ec_event {
67 ACPI_EC_EVENT_OBF_1 = 1, /* Output buffer full */ 67 ACPI_EC_EVENT_OBF_1 = 1, /* Output buffer full */
68 ACPI_EC_EVENT_IBF_0, /* Input buffer empty */ 68 ACPI_EC_EVENT_IBF_0, /* Input buffer empty */
69}; 69};
70 70
71#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 71#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
72#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 72#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
73 73
74static enum ec_mode { 74enum {
75 EC_INTR = 1, /* Output buffer full */ 75 EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */
76 EC_POLL, /* Input buffer empty */ 76 EC_FLAGS_QUERY_PENDING, /* Query is pending */
77} acpi_ec_mode = EC_INTR; 77 EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */
78 EC_FLAGS_ONLY_IBF_GPE, /* Expect GPE only for IBF = 0 event */
79};
78 80
79static int acpi_ec_remove(struct acpi_device *device, int type); 81static int acpi_ec_remove(struct acpi_device *device, int type);
80static int acpi_ec_start(struct acpi_device *device); 82static int acpi_ec_start(struct acpi_device *device);
@@ -116,9 +118,8 @@ static struct acpi_ec {
116 unsigned long command_addr; 118 unsigned long command_addr;
117 unsigned long data_addr; 119 unsigned long data_addr;
118 unsigned long global_lock; 120 unsigned long global_lock;
121 unsigned long flags;
119 struct mutex lock; 122 struct mutex lock;
120 atomic_t query_pending;
121 atomic_t event_count;
122 wait_queue_head_t wait; 123 wait_queue_head_t wait;
123 struct list_head list; 124 struct list_head list;
124 u8 handlers_installed; 125 u8 handlers_installed;
@@ -148,45 +149,54 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
148 outb(data, ec->data_addr); 149 outb(data, ec->data_addr);
149} 150}
150 151
151static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event, 152static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event)
152 unsigned old_count)
153{ 153{
154 u8 status = acpi_ec_read_status(ec); 154 if (test_bit(EC_FLAGS_WAIT_GPE, &ec->flags))
155 if (old_count == atomic_read(&ec->event_count))
156 return 0; 155 return 0;
157 if (event == ACPI_EC_EVENT_OBF_1) { 156 if (event == ACPI_EC_EVENT_OBF_1) {
158 if (status & ACPI_EC_FLAG_OBF) 157 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_OBF)
159 return 1; 158 return 1;
160 } else if (event == ACPI_EC_EVENT_IBF_0) { 159 } else if (event == ACPI_EC_EVENT_IBF_0) {
161 if (!(status & ACPI_EC_FLAG_IBF)) 160 if (!(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
162 return 1; 161 return 1;
163 } 162 }
164 163
165 return 0; 164 return 0;
166} 165}
167 166
168static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, 167static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
169 unsigned count, int force_poll)
170{ 168{
171 if (unlikely(force_poll) || acpi_ec_mode == EC_POLL) { 169 if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) &&
170 likely(!force_poll)) {
171 if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event),
172 msecs_to_jiffies(ACPI_EC_DELAY)))
173 return 0;
174 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
175 if (acpi_ec_check_status(ec, event)) {
176 if (event == ACPI_EC_EVENT_OBF_1) {
177 /* miss OBF = 1 GPE, don't expect it anymore */
178 printk(KERN_INFO PREFIX "missing OBF_1 confirmation,"
179 "switching to degraded mode.\n");
180 set_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags);
181 } else {
182 /* missing GPEs, switch back to poll mode */
183 printk(KERN_INFO PREFIX "missing IBF_1 confirmations,"
184 "switch off interrupt mode.\n");
185 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
186 }
187 return 0;
188 }
189 } else {
172 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); 190 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
191 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
173 while (time_before(jiffies, delay)) { 192 while (time_before(jiffies, delay)) {
174 if (acpi_ec_check_status(ec, event, 0)) 193 if (acpi_ec_check_status(ec, event))
175 return 0; 194 return 0;
176 } 195 }
177 } else { 196 }
178 if (wait_event_timeout(ec->wait, 197 printk(KERN_ERR PREFIX "acpi_ec_wait timeout,"
179 acpi_ec_check_status(ec, event, count),
180 msecs_to_jiffies(ACPI_EC_DELAY)) ||
181 acpi_ec_check_status(ec, event, 0)) {
182 return 0;
183 } else {
184 printk(KERN_ERR PREFIX "acpi_ec_wait timeout,"
185 " status = %d, expect_event = %d\n", 198 " status = %d, expect_event = %d\n",
186 acpi_ec_read_status(ec), event); 199 acpi_ec_read_status(ec), event);
187 }
188 }
189
190 return -ETIME; 200 return -ETIME;
191} 201}
192 202
@@ -196,39 +206,42 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
196 int force_poll) 206 int force_poll)
197{ 207{
198 int result = 0; 208 int result = 0;
199 unsigned count = atomic_read(&ec->event_count); 209 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
200 acpi_ec_write_cmd(ec, command); 210 acpi_ec_write_cmd(ec, command);
201 211
202 for (; wdata_len > 0; --wdata_len) { 212 for (; wdata_len > 0; --wdata_len) {
203 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll); 213 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
204 if (result) { 214 if (result) {
205 printk(KERN_ERR PREFIX 215 printk(KERN_ERR PREFIX
206 "write_cmd timeout, command = %d\n", command); 216 "write_cmd timeout, command = %d\n", command);
207 goto end; 217 goto end;
208 } 218 }
209 count = atomic_read(&ec->event_count); 219 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
210 acpi_ec_write_data(ec, *(wdata++)); 220 acpi_ec_write_data(ec, *(wdata++));
211 } 221 }
212 222
213 if (!rdata_len) { 223 if (!rdata_len) {
214 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll); 224 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
215 if (result) { 225 if (result) {
216 printk(KERN_ERR PREFIX 226 printk(KERN_ERR PREFIX
217 "finish-write timeout, command = %d\n", command); 227 "finish-write timeout, command = %d\n", command);
218 goto end; 228 goto end;
219 } 229 }
220 } else if (command == ACPI_EC_COMMAND_QUERY) { 230 } else if (command == ACPI_EC_COMMAND_QUERY)
221 atomic_set(&ec->query_pending, 0); 231 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
222 }
223 232
224 for (; rdata_len > 0; --rdata_len) { 233 for (; rdata_len > 0; --rdata_len) {
225 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count, force_poll); 234 if (test_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags))
235 force_poll = 1;
236 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll);
226 if (result) { 237 if (result) {
227 printk(KERN_ERR PREFIX "read timeout, command = %d\n", 238 printk(KERN_ERR PREFIX "read timeout, command = %d\n",
228 command); 239 command);
229 goto end; 240 goto end;
230 } 241 }
231 count = atomic_read(&ec->event_count); 242 /* Don't expect GPE after last read */
243 if (rdata_len > 1)
244 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
232 *(rdata++) = acpi_ec_read_data(ec); 245 *(rdata++) = acpi_ec_read_data(ec);
233 } 246 }
234 end: 247 end:
@@ -258,10 +271,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
258 } 271 }
259 } 272 }
260 273
261 /* Make sure GPE is enabled before doing transaction */ 274 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0);
262 acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
263
264 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0, 0);
265 if (status) { 275 if (status) {
266 printk(KERN_ERR PREFIX 276 printk(KERN_ERR PREFIX
267 "input buffer is not empty, aborting transaction\n"); 277 "input buffer is not empty, aborting transaction\n");
@@ -435,9 +445,9 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
435 445
436void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) 446void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
437{ 447{
438 struct acpi_ec_query_handler *handler; 448 struct acpi_ec_query_handler *handler, *tmp;
439 mutex_lock(&ec->lock); 449 mutex_lock(&ec->lock);
440 list_for_each_entry(handler, &ec->list, node) { 450 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
441 if (query_bit == handler->query_bit) { 451 if (query_bit == handler->query_bit) {
442 list_del(&handler->node); 452 list_del(&handler->node);
443 kfree(handler); 453 kfree(handler);
@@ -476,23 +486,24 @@ static void acpi_ec_gpe_query(void *ec_cxt)
476static u32 acpi_ec_gpe_handler(void *data) 486static u32 acpi_ec_gpe_handler(void *data)
477{ 487{
478 acpi_status status = AE_OK; 488 acpi_status status = AE_OK;
479 u8 value;
480 struct acpi_ec *ec = data; 489 struct acpi_ec *ec = data;
481 490
482 atomic_inc(&ec->event_count); 491 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
483 492 if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
484 if (acpi_ec_mode == EC_INTR) {
485 wake_up(&ec->wait); 493 wake_up(&ec->wait);
486 }
487 494
488 value = acpi_ec_read_status(ec); 495 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_SCI) {
489 if ((value & ACPI_EC_FLAG_SCI) && !atomic_read(&ec->query_pending)) { 496 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
490 atomic_set(&ec->query_pending, 1); 497 status = acpi_os_execute(OSL_EC_BURST_HANDLER,
491 status = 498 acpi_ec_gpe_query, ec);
492 acpi_os_execute(OSL_EC_BURST_HANDLER, acpi_ec_gpe_query, ec); 499 } else if (unlikely(!test_bit(EC_FLAGS_GPE_MODE, &ec->flags))) {
500 /* this is non-query, must be confirmation */
501 printk(KERN_INFO PREFIX "non-query interrupt received,"
502 " switching to interrupt mode\n");
503 set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
493 } 504 }
494 505
495 return status == AE_OK ? 506 return ACPI_SUCCESS(status) ?
496 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 507 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
497} 508}
498 509
@@ -641,13 +652,10 @@ static struct acpi_ec *make_acpi_ec(void)
641 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL); 652 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
642 if (!ec) 653 if (!ec)
643 return NULL; 654 return NULL;
644 655 ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
645 atomic_set(&ec->query_pending, 1);
646 atomic_set(&ec->event_count, 1);
647 mutex_init(&ec->lock); 656 mutex_init(&ec->lock);
648 init_waitqueue_head(&ec->wait); 657 init_waitqueue_head(&ec->wait);
649 INIT_LIST_HEAD(&ec->list); 658 INIT_LIST_HEAD(&ec->list);
650
651 return ec; 659 return ec;
652} 660}
653 661
@@ -741,6 +749,8 @@ static int acpi_ec_add(struct acpi_device *device)
741 acpi_ec_add_fs(device); 749 acpi_ec_add_fs(device);
742 printk(KERN_INFO PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n", 750 printk(KERN_INFO PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
743 ec->gpe, ec->command_addr, ec->data_addr); 751 ec->gpe, ec->command_addr, ec->data_addr);
752 printk(KERN_INFO PREFIX "driver started in %s mode\n",
753 (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))?"interrupt":"poll");
744 return 0; 754 return 0;
745} 755}
746 756
@@ -833,7 +843,7 @@ static int acpi_ec_start(struct acpi_device *device)
833 ret = ec_install_handlers(ec); 843 ret = ec_install_handlers(ec);
834 844
835 /* EC is fully operational, allow queries */ 845 /* EC is fully operational, allow queries */
836 atomic_set(&ec->query_pending, 0); 846 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
837 return ret; 847 return ret;
838} 848}
839 849
@@ -924,20 +934,4 @@ static void __exit acpi_ec_exit(void)
924 934
925 return; 935 return;
926} 936}
927#endif /* 0 */ 937#endif /* 0 */
928
929static int __init acpi_ec_set_intr_mode(char *str)
930{
931 int intr;
932
933 if (!get_option(&str, &intr))
934 return 0;
935
936 acpi_ec_mode = (intr) ? EC_INTR : EC_POLL;
937
938 printk(KERN_NOTICE PREFIX "%s mode.\n", intr ? "interrupt" : "polling");
939
940 return 1;
941}
942
943__setup("ec_intr=", acpi_ec_set_intr_mode);