aboutsummaryrefslogtreecommitdiffstats
path: root/sound/firewire
diff options
context:
space:
mode:
authorTakashi Sakamoto <o-takashi@sakamocchi.jp>2016-08-31 09:58:42 -0400
committerTakashi Iwai <tiwai@suse.de>2016-08-31 10:17:15 -0400
commit6b1ca4bcadf9ef077cc5f03c6822ba276ed14902 (patch)
tree9f30fa84c9ce173059e7250cba2722a3e763b159 /sound/firewire
parent04b2d9c9c319277ad4fbbb71855c256a9f4d5f98 (diff)
ALSA: fireworks: accessing to user space outside spinlock
In hwdep interface of fireworks driver, accessing to user space is in a critical section with disabled local interrupt. Depending on architecture, accessing to user space can cause page fault exception. Then local processor stores machine status and handles the synchronous event. A handler corresponding to the event can call task scheduler to wait for preparing pages. In a case of usage of single core processor, the state to disable local interrupt is worse because it don't handle usual interrupts from hardware. This commit fixes this bug, performing the accessing outside spinlock. This commit also gives up counting the number of queued response messages to simplify ring-buffer management. Reported-by: Vaishali Thakkar <vaishali.thakkar@oracle.com> Cc: stable@vger.kernel.org Fixes: 555e8a8f7f14('ALSA: fireworks: Add command/response functionality into hwdep interface') Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp> Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound/firewire')
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_hwdep.c71
-rw-r--r--sound/firewire/fireworks/fireworks_proc.c4
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c5
4 files changed, 56 insertions, 25 deletions
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 03ed35237e2b..d73c12b8753d 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -108,7 +108,6 @@ struct snd_efw {
108 u8 *resp_buf; 108 u8 *resp_buf;
109 u8 *pull_ptr; 109 u8 *pull_ptr;
110 u8 *push_ptr; 110 u8 *push_ptr;
111 unsigned int resp_queues;
112}; 111};
113 112
114int snd_efw_transaction_cmd(struct fw_unit *unit, 113int snd_efw_transaction_cmd(struct fw_unit *unit,
diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
index 33df8655fe81..2e1d9a23920c 100644
--- a/sound/firewire/fireworks/fireworks_hwdep.c
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
25{ 25{
26 unsigned int length, till_end, type; 26 unsigned int length, till_end, type;
27 struct snd_efw_transaction *t; 27 struct snd_efw_transaction *t;
28 u8 *pull_ptr;
28 long count = 0; 29 long count = 0;
29 30
30 if (remained < sizeof(type) + sizeof(struct snd_efw_transaction)) 31 if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
38 buf += sizeof(type); 39 buf += sizeof(type);
39 40
40 /* write into buffer as many responses as possible */ 41 /* write into buffer as many responses as possible */
41 while (efw->resp_queues > 0) { 42 spin_lock_irq(&efw->lock);
42 t = (struct snd_efw_transaction *)(efw->pull_ptr); 43
44 /*
45 * When another task reaches here during this task's access to user
46 * space, it picks up current position in buffer and can read the same
47 * series of responses.
48 */
49 pull_ptr = efw->pull_ptr;
50
51 while (efw->push_ptr != pull_ptr) {
52 t = (struct snd_efw_transaction *)(pull_ptr);
43 length = be32_to_cpu(t->length) * sizeof(__be32); 53 length = be32_to_cpu(t->length) * sizeof(__be32);
44 54
45 /* confirm enough space for this response */ 55 /* confirm enough space for this response */
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
49 /* copy from ring buffer to user buffer */ 59 /* copy from ring buffer to user buffer */
50 while (length > 0) { 60 while (length > 0) {
51 till_end = snd_efw_resp_buf_size - 61 till_end = snd_efw_resp_buf_size -
52 (unsigned int)(efw->pull_ptr - efw->resp_buf); 62 (unsigned int)(pull_ptr - efw->resp_buf);
53 till_end = min_t(unsigned int, length, till_end); 63 till_end = min_t(unsigned int, length, till_end);
54 64
55 if (copy_to_user(buf, efw->pull_ptr, till_end)) 65 spin_unlock_irq(&efw->lock);
66
67 if (copy_to_user(buf, pull_ptr, till_end))
56 return -EFAULT; 68 return -EFAULT;
57 69
58 efw->pull_ptr += till_end; 70 spin_lock_irq(&efw->lock);
59 if (efw->pull_ptr >= efw->resp_buf + 71
60 snd_efw_resp_buf_size) 72 pull_ptr += till_end;
61 efw->pull_ptr -= snd_efw_resp_buf_size; 73 if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
74 pull_ptr -= snd_efw_resp_buf_size;
62 75
63 length -= till_end; 76 length -= till_end;
64 buf += till_end; 77 buf += till_end;
65 count += till_end; 78 count += till_end;
66 remained -= till_end; 79 remained -= till_end;
67 } 80 }
68
69 efw->resp_queues--;
70 } 81 }
71 82
83 /*
84 * All of tasks can read from the buffer nearly simultaneously, but the
85 * last position for each task is different depending on the length of
86 * given buffer. Here, for simplicity, a position of buffer is set by
87 * the latest task. It's better for a listening application to allow one
88 * thread to read from the buffer. Unless, each task can read different
89 * sequence of responses depending on variation of buffer length.
90 */
91 efw->pull_ptr = pull_ptr;
92
93 spin_unlock_irq(&efw->lock);
94
72 return count; 95 return count;
73} 96}
74 97
@@ -76,14 +99,17 @@ static long
76hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count, 99hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
77 loff_t *offset) 100 loff_t *offset)
78{ 101{
79 union snd_firewire_event event; 102 union snd_firewire_event event = {
103 .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
104 };
80 105
81 memset(&event, 0, sizeof(event)); 106 spin_lock_irq(&efw->lock);
82 107
83 event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
84 event.lock_status.status = (efw->dev_lock_count > 0); 108 event.lock_status.status = (efw->dev_lock_count > 0);
85 efw->dev_lock_changed = false; 109 efw->dev_lock_changed = false;
86 110
111 spin_unlock_irq(&efw->lock);
112
87 count = min_t(long, count, sizeof(event.lock_status)); 113 count = min_t(long, count, sizeof(event.lock_status));
88 114
89 if (copy_to_user(buf, &event, count)) 115 if (copy_to_user(buf, &event, count))
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
98{ 124{
99 struct snd_efw *efw = hwdep->private_data; 125 struct snd_efw *efw = hwdep->private_data;
100 DEFINE_WAIT(wait); 126 DEFINE_WAIT(wait);
127 bool dev_lock_changed;
128 bool queued;
101 129
102 spin_lock_irq(&efw->lock); 130 spin_lock_irq(&efw->lock);
103 131
104 while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) { 132 dev_lock_changed = efw->dev_lock_changed;
133 queued = efw->push_ptr != efw->pull_ptr;
134
135 while (!dev_lock_changed && !queued) {
105 prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE); 136 prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
106 spin_unlock_irq(&efw->lock); 137 spin_unlock_irq(&efw->lock);
107 schedule(); 138 schedule();
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
109 if (signal_pending(current)) 140 if (signal_pending(current))
110 return -ERESTARTSYS; 141 return -ERESTARTSYS;
111 spin_lock_irq(&efw->lock); 142 spin_lock_irq(&efw->lock);
143 dev_lock_changed = efw->dev_lock_changed;
144 queued = efw->push_ptr != efw->pull_ptr;
112 } 145 }
113 146
114 if (efw->dev_lock_changed) 147 spin_unlock_irq(&efw->lock);
148
149 if (dev_lock_changed)
115 count = hwdep_read_locked(efw, buf, count, offset); 150 count = hwdep_read_locked(efw, buf, count, offset);
116 else if (efw->resp_queues > 0) 151 else if (queued)
117 count = hwdep_read_resp_buf(efw, buf, count, offset); 152 count = hwdep_read_resp_buf(efw, buf, count, offset);
118 153
119 spin_unlock_irq(&efw->lock);
120
121 return count; 154 return count;
122} 155}
123 156
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
160 poll_wait(file, &efw->hwdep_wait, wait); 193 poll_wait(file, &efw->hwdep_wait, wait);
161 194
162 spin_lock_irq(&efw->lock); 195 spin_lock_irq(&efw->lock);
163 if (efw->dev_lock_changed || (efw->resp_queues > 0)) 196 if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
164 events = POLLIN | POLLRDNORM; 197 events = POLLIN | POLLRDNORM;
165 else 198 else
166 events = 0; 199 events = 0;
diff --git a/sound/firewire/fireworks/fireworks_proc.c b/sound/firewire/fireworks/fireworks_proc.c
index 0639dcb13f7d..beb0a0ffee57 100644
--- a/sound/firewire/fireworks/fireworks_proc.c
+++ b/sound/firewire/fireworks/fireworks_proc.c
@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
188 else 188 else
189 consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr); 189 consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
190 190
191 snd_iprintf(buffer, "%d %d/%d\n", 191 snd_iprintf(buffer, "%d/%d\n",
192 efw->resp_queues, consumed, snd_efw_resp_buf_size); 192 consumed, snd_efw_resp_buf_size);
193} 193}
194 194
195static void 195static void
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index f550808d1784..36a08ba51ec7 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
121 size_t capacity, till_end; 121 size_t capacity, till_end;
122 struct snd_efw_transaction *t; 122 struct snd_efw_transaction *t;
123 123
124 spin_lock_irq(&efw->lock);
125
126 t = (struct snd_efw_transaction *)data; 124 t = (struct snd_efw_transaction *)data;
127 length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length); 125 length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
128 126
127 spin_lock_irq(&efw->lock);
128
129 if (efw->push_ptr < efw->pull_ptr) 129 if (efw->push_ptr < efw->pull_ptr)
130 capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr); 130 capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
131 else 131 else
@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
155 } 155 }
156 156
157 /* for hwdep */ 157 /* for hwdep */
158 efw->resp_queues++;
159 wake_up(&efw->hwdep_wait); 158 wake_up(&efw->hwdep_wait);
160 159
161 *rcode = RCODE_COMPLETE; 160 *rcode = RCODE_COMPLETE;