aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPaul Fertser <fercerpav@gmail.com>2009-07-27 16:58:48 -0400
committerSamuel Ortiz <sameo@linux.intel.com>2009-09-17 03:46:53 -0400
commitbd8ef10261d7ae92ad2b4925afd2b56f46175c47 (patch)
treef88c4992b1473ff5b72dc5d0b97522bf964363d4 /drivers
parented52e62ebec9e703eb0b69704feaf1b6e847d882 (diff)
mfd: revise locking for pcf50633 ADC
Current implementation is prone to races, this patch attempts to remove all but one (in pcf50633_adc_sync_read). The idea is that we need to guard the queue access only on inserting and removing items. If we insert and there're no more items in the queue it means that the last irq already happened and we need to trigger ADC manually. If not, then the next conversion will be triggered by the irq handler upon completion of the previous. Signed-off-by: Paul Fertser <fercerpav@gmail.com> Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mfd/pcf50633-adc.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index c2d05becfa97..3d31e97d6a45 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -73,15 +73,10 @@ static void trigger_next_adc_job_if_any(struct pcf50633 *pcf)
73 struct pcf50633_adc *adc = __to_adc(pcf); 73 struct pcf50633_adc *adc = __to_adc(pcf);
74 int head; 74 int head;
75 75
76 mutex_lock(&adc->queue_mutex);
77
78 head = adc->queue_head; 76 head = adc->queue_head;
79 77
80 if (!adc->queue[head]) { 78 if (!adc->queue[head])
81 mutex_unlock(&adc->queue_mutex);
82 return; 79 return;
83 }
84 mutex_unlock(&adc->queue_mutex);
85 80
86 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); 81 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg);
87} 82}
@@ -99,16 +94,17 @@ adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
99 94
100 if (adc->queue[tail]) { 95 if (adc->queue[tail]) {
101 mutex_unlock(&adc->queue_mutex); 96 mutex_unlock(&adc->queue_mutex);
97 dev_err(pcf->dev, "ADC queue is full, dropping request\n");
102 return -EBUSY; 98 return -EBUSY;
103 } 99 }
104 100
105 adc->queue[tail] = req; 101 adc->queue[tail] = req;
102 if (head == tail)
103 trigger_next_adc_job_if_any(pcf);
106 adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1); 104 adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
107 105
108 mutex_unlock(&adc->queue_mutex); 106 mutex_unlock(&adc->queue_mutex);
109 107
110 trigger_next_adc_job_if_any(pcf);
111
112 return 0; 108 return 0;
113} 109}
114 110
@@ -124,6 +120,7 @@ pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result)
124int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg) 120int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
125{ 121{
126 struct pcf50633_adc_request *req; 122 struct pcf50633_adc_request *req;
123 int err;
127 124
128 /* req is freed when the result is ready, in interrupt handler */ 125 /* req is freed when the result is ready, in interrupt handler */
129 req = kzalloc(sizeof(*req), GFP_KERNEL); 126 req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -136,9 +133,13 @@ int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
136 req->callback_param = req; 133 req->callback_param = req;
137 134
138 init_completion(&req->completion); 135 init_completion(&req->completion);
139 adc_enqueue_request(pcf, req); 136 err = adc_enqueue_request(pcf, req);
137 if (err)
138 return err;
139
140 wait_for_completion(&req->completion); 140 wait_for_completion(&req->completion);
141 141
142 /* FIXME by this time req might be already freed */
142 return req->result; 143 return req->result;
143} 144}
144EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read); 145EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
@@ -159,9 +160,7 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
159 req->callback = callback; 160 req->callback = callback;
160 req->callback_param = callback_param; 161 req->callback_param = callback_param;
161 162
162 adc_enqueue_request(pcf, req); 163 return adc_enqueue_request(pcf, req);
163
164 return 0;
165} 164}
166EXPORT_SYMBOL_GPL(pcf50633_adc_async_read); 165EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
167 166
@@ -184,7 +183,7 @@ static void pcf50633_adc_irq(int irq, void *data)
184 struct pcf50633_adc *adc = data; 183 struct pcf50633_adc *adc = data;
185 struct pcf50633 *pcf = adc->pcf; 184 struct pcf50633 *pcf = adc->pcf;
186 struct pcf50633_adc_request *req; 185 struct pcf50633_adc_request *req;
187 int head; 186 int head, res;
188 187
189 mutex_lock(&adc->queue_mutex); 188 mutex_lock(&adc->queue_mutex);
190 head = adc->queue_head; 189 head = adc->queue_head;
@@ -199,12 +198,13 @@ static void pcf50633_adc_irq(int irq, void *data)
199 adc->queue_head = (head + 1) & 198 adc->queue_head = (head + 1) &
200 (PCF50633_MAX_ADC_FIFO_DEPTH - 1); 199 (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
201 200
201 res = adc_result(pcf);
202 trigger_next_adc_job_if_any(pcf);
203
202 mutex_unlock(&adc->queue_mutex); 204 mutex_unlock(&adc->queue_mutex);
203 205
204 req->callback(pcf, req->callback_param, adc_result(pcf)); 206 req->callback(pcf, req->callback_param, res);
205 kfree(req); 207 kfree(req);
206
207 trigger_next_adc_job_if_any(pcf);
208} 208}
209 209
210static int __devinit pcf50633_adc_probe(struct platform_device *pdev) 210static int __devinit pcf50633_adc_probe(struct platform_device *pdev)