aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Herrmann <dh.herrmann@gmail.com>2013-04-08 00:13:19 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2013-06-10 01:35:05 -0400
commit483180281f0ac60d1138710eb21f4b9961901294 (patch)
tree702dcb73d06e3bc6d3a2d9ce5a1bf663448acfa9
parent3d289517dfd48f6487efda81543c3dda8b0e66f2 (diff)
Input: evdev - flush queues during EVIOCGKEY-like ioctls
If userspace requests current KEY-state, they very likely assume that no such events are pending in the output queue of the evdev device. Otherwise, they will parse events which they already handled via EVIOCGKEY(). For XKB applications this can cause irreversible keyboard states if a modifier is locked multiple times because a CTRL-DOWN event is handled once via EVIOCGKEY() and once from the queue via read(), even though it should handle it only once. Therefore, lets do the only logical thing and flush the evdev queue atomically during this ioctl. We only flush events that are affected by the given ioctl. This only affects boolean events like KEY, SND, SW and LED. ABS, REL and others are not affected as duplicate events can be handled gracefully by user-space. Note: This actually breaks semantics of the evdev ABI. However, investigations showed that userspace already expects the new semantics and we end up fixing at least all XKB applications. All applications that are aware of this race-condition mirror the KEY state for each open-file and detect/drop duplicate events. Hence, they do not care whether duplicates are posted or not and work fine with this fix. Also note that we need proper locking to guarantee atomicity and avoid dead-locks. event_lock must be locked before queue_lock (see input-core). However, we can safely release event_lock while flushing the queue. This allows the input-core to proceed with pending events and only stop if it needs our queue_lock to post new events. This should guarantee that we don't block event-dispatching for too long while flushing a single event queue. Signed-off-by: David Herrmann <dh.herrmann@gmail.com> Acked-by: Peter Hutterer <peter.hutterer@who-t.net> Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
-rw-r--r--drivers/input/evdev.c133
1 files changed, 129 insertions, 4 deletions
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index f0f8928b3c8a..d2b34fbbc42e 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -52,6 +52,82 @@ struct evdev_client {
52 struct input_event buffer[]; 52 struct input_event buffer[];
53}; 53};
54 54
55/* flush queued events of type @type, caller must hold client->buffer_lock */
56static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
57{
58 unsigned int i, head, num;
59 unsigned int mask = client->bufsize - 1;
60 bool is_report;
61 struct input_event *ev;
62
63 BUG_ON(type == EV_SYN);
64
65 head = client->tail;
66 client->packet_head = client->tail;
67
68 /* init to 1 so a leading SYN_REPORT will not be dropped */
69 num = 1;
70
71 for (i = client->tail; i != client->head; i = (i + 1) & mask) {
72 ev = &client->buffer[i];
73 is_report = ev->type == EV_SYN && ev->code == SYN_REPORT;
74
75 if (ev->type == type) {
76 /* drop matched entry */
77 continue;
78 } else if (is_report && !num) {
79 /* drop empty SYN_REPORT groups */
80 continue;
81 } else if (head != i) {
82 /* move entry to fill the gap */
83 client->buffer[head].time = ev->time;
84 client->buffer[head].type = ev->type;
85 client->buffer[head].code = ev->code;
86 client->buffer[head].value = ev->value;
87 }
88
89 num++;
90 head = (head + 1) & mask;
91
92 if (is_report) {
93 num = 0;
94 client->packet_head = head;
95 }
96 }
97
98 client->head = head;
99}
100
101/* queue SYN_DROPPED event */
102static void evdev_queue_syn_dropped(struct evdev_client *client)
103{
104 unsigned long flags;
105 struct input_event ev;
106 ktime_t time;
107
108 time = ktime_get();
109 if (client->clkid != CLOCK_MONOTONIC)
110 time = ktime_sub(time, ktime_get_monotonic_offset());
111
112 ev.time = ktime_to_timeval(time);
113 ev.type = EV_SYN;
114 ev.code = SYN_DROPPED;
115 ev.value = 0;
116
117 spin_lock_irqsave(&client->buffer_lock, flags);
118
119 client->buffer[client->head++] = ev;
120 client->head &= client->bufsize - 1;
121
122 if (unlikely(client->head == client->tail)) {
123 /* drop queue but keep our SYN_DROPPED event */
124 client->tail = (client->head - 1) & (client->bufsize - 1);
125 client->packet_head = client->tail;
126 }
127
128 spin_unlock_irqrestore(&client->buffer_lock, flags);
129}
130
55static void __pass_event(struct evdev_client *client, 131static void __pass_event(struct evdev_client *client,
56 const struct input_event *event) 132 const struct input_event *event)
57{ 133{
@@ -650,6 +726,51 @@ static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
650 return input_set_keycode(dev, &ke); 726 return input_set_keycode(dev, &ke);
651} 727}
652 728
729/*
730 * If we transfer state to the user, we should flush all pending events
731 * of the same type from the client's queue. Otherwise, they might end up
732 * with duplicate events, which can screw up client's state tracking.
733 * If bits_to_user fails after flushing the queue, we queue a SYN_DROPPED
734 * event so user-space will notice missing events.
735 *
736 * LOCKING:
737 * We need to take event_lock before buffer_lock to avoid dead-locks. But we
738 * need the even_lock only to guarantee consistent state. We can safely release
739 * it while flushing the queue. This allows input-core to handle filters while
740 * we flush the queue.
741 */
742static int evdev_handle_get_val(struct evdev_client *client,
743 struct input_dev *dev, unsigned int type,
744 unsigned long *bits, unsigned int max,
745 unsigned int size, void __user *p, int compat)
746{
747 int ret;
748 unsigned long *mem;
749
750 mem = kmalloc(sizeof(unsigned long) * max, GFP_KERNEL);
751 if (!mem)
752 return -ENOMEM;
753
754 spin_lock_irq(&dev->event_lock);
755 spin_lock(&client->buffer_lock);
756
757 memcpy(mem, bits, sizeof(unsigned long) * max);
758
759 spin_unlock(&dev->event_lock);
760
761 __evdev_flush_queue(client, type);
762
763 spin_unlock_irq(&client->buffer_lock);
764
765 ret = bits_to_user(mem, max, size, p, compat);
766 if (ret < 0)
767 evdev_queue_syn_dropped(client);
768
769 kfree(mem);
770
771 return ret;
772}
773
653static int evdev_handle_mt_request(struct input_dev *dev, 774static int evdev_handle_mt_request(struct input_dev *dev,
654 unsigned int size, 775 unsigned int size,
655 int __user *ip) 776 int __user *ip)
@@ -771,16 +892,20 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
771 return evdev_handle_mt_request(dev, size, ip); 892 return evdev_handle_mt_request(dev, size, ip);
772 893
773 case EVIOCGKEY(0): 894 case EVIOCGKEY(0):
774 return bits_to_user(dev->key, KEY_MAX, size, p, compat_mode); 895 return evdev_handle_get_val(client, dev, EV_KEY, dev->key,
896 KEY_MAX, size, p, compat_mode);
775 897
776 case EVIOCGLED(0): 898 case EVIOCGLED(0):
777 return bits_to_user(dev->led, LED_MAX, size, p, compat_mode); 899 return evdev_handle_get_val(client, dev, EV_LED, dev->led,
900 LED_MAX, size, p, compat_mode);
778 901
779 case EVIOCGSND(0): 902 case EVIOCGSND(0):
780 return bits_to_user(dev->snd, SND_MAX, size, p, compat_mode); 903 return evdev_handle_get_val(client, dev, EV_SND, dev->snd,
904 SND_MAX, size, p, compat_mode);
781 905
782 case EVIOCGSW(0): 906 case EVIOCGSW(0):
783 return bits_to_user(dev->sw, SW_MAX, size, p, compat_mode); 907 return evdev_handle_get_val(client, dev, EV_SW, dev->sw,
908 SW_MAX, size, p, compat_mode);
784 909
785 case EVIOCGNAME(0): 910 case EVIOCGNAME(0):
786 return str_to_user(dev->name, size, p); 911 return str_to_user(dev->name, size, p);