aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjorn Andersson <bjorn.andersson@sonymobile.com>2016-02-18 01:39:03 -0500
committerAndy Gross <andy.gross@linaro.org>2016-03-30 18:20:51 -0400
commit995b170aeaef4afe0c3469d14b9c80ff2e8a98d7 (patch)
tree7f72aa324c1e94c480a334e67ded3fb381cf7212
parent39f0db298e7c02a29371fb39cabdd5d76e6b726c (diff)
soc: qcom: smd: Split discovery and state change work
Split the two steps of channel discovery and state change handling into two different workers. This allows for new channels to be found while we're are probing, which is required as we introduce multi-channel support. Signed-off-by: Bjorn Andersson <bjorn.andersson@sonymobile.com> Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org> Signed-off-by: Andy Gross <andy.gross@linaro.org>
-rw-r--r--drivers/soc/qcom/smd.c58
1 files changed, 31 insertions, 27 deletions
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index c357842b92e1..e8972ddfee85 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -106,9 +106,9 @@ static const struct {
106 * @channels: list of all channels detected on this edge 106 * @channels: list of all channels detected on this edge
107 * @channels_lock: guard for modifications of @channels 107 * @channels_lock: guard for modifications of @channels
108 * @allocated: array of bitmaps representing already allocated channels 108 * @allocated: array of bitmaps representing already allocated channels
109 * @need_rescan: flag that the @work needs to scan smem for new channels
110 * @smem_available: last available amount of smem triggering a channel scan 109 * @smem_available: last available amount of smem triggering a channel scan
111 * @work: work item for edge house keeping 110 * @scan_work: work item for discovering new channels
111 * @state_work: work item for edge state changes
112 */ 112 */
113struct qcom_smd_edge { 113struct qcom_smd_edge {
114 struct qcom_smd *smd; 114 struct qcom_smd *smd;
@@ -127,10 +127,10 @@ struct qcom_smd_edge {
127 127
128 DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE); 128 DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
129 129
130 bool need_rescan;
131 unsigned smem_available; 130 unsigned smem_available;
132 131
133 struct work_struct work; 132 struct work_struct scan_work;
133 struct work_struct state_work;
134}; 134};
135 135
136/* 136/*
@@ -614,7 +614,8 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
614 struct qcom_smd_edge *edge = data; 614 struct qcom_smd_edge *edge = data;
615 struct qcom_smd_channel *channel; 615 struct qcom_smd_channel *channel;
616 unsigned available; 616 unsigned available;
617 bool kick_worker = false; 617 bool kick_scanner = false;
618 bool kick_state = false;
618 619
619 /* 620 /*
620 * Handle state changes or data on each of the channels on this edge 621 * Handle state changes or data on each of the channels on this edge
@@ -622,7 +623,7 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
622 spin_lock(&edge->channels_lock); 623 spin_lock(&edge->channels_lock);
623 list_for_each_entry(channel, &edge->channels, list) { 624 list_for_each_entry(channel, &edge->channels, list) {
624 spin_lock(&channel->recv_lock); 625 spin_lock(&channel->recv_lock);
625 kick_worker |= qcom_smd_channel_intr(channel); 626 kick_state |= qcom_smd_channel_intr(channel);
626 spin_unlock(&channel->recv_lock); 627 spin_unlock(&channel->recv_lock);
627 } 628 }
628 spin_unlock(&edge->channels_lock); 629 spin_unlock(&edge->channels_lock);
@@ -635,12 +636,13 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
635 available = qcom_smem_get_free_space(edge->remote_pid); 636 available = qcom_smem_get_free_space(edge->remote_pid);
636 if (available != edge->smem_available) { 637 if (available != edge->smem_available) {
637 edge->smem_available = available; 638 edge->smem_available = available;
638 edge->need_rescan = true; 639 kick_scanner = true;
639 kick_worker = true;
640 } 640 }
641 641
642 if (kick_worker) 642 if (kick_scanner)
643 schedule_work(&edge->work); 643 schedule_work(&edge->scan_work);
644 if (kick_state)
645 schedule_work(&edge->state_work);
644 646
645 return IRQ_HANDLED; 647 return IRQ_HANDLED;
646} 648}
@@ -1098,8 +1100,9 @@ free_name_and_channel:
1098 * qcom_smd_create_channel() to create representations of these and add 1100 * qcom_smd_create_channel() to create representations of these and add
1099 * them to the edge's list of channels. 1101 * them to the edge's list of channels.
1100 */ 1102 */
1101static void qcom_discover_channels(struct qcom_smd_edge *edge) 1103static void qcom_channel_scan_worker(struct work_struct *work)
1102{ 1104{
1105 struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
1103 struct qcom_smd_alloc_entry *alloc_tbl; 1106 struct qcom_smd_alloc_entry *alloc_tbl;
1104 struct qcom_smd_alloc_entry *entry; 1107 struct qcom_smd_alloc_entry *entry;
1105 struct qcom_smd_channel *channel; 1108 struct qcom_smd_channel *channel;
@@ -1152,7 +1155,7 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
1152 } 1155 }
1153 } 1156 }
1154 1157
1155 schedule_work(&edge->work); 1158 schedule_work(&edge->state_work);
1156} 1159}
1157 1160
1158/* 1161/*
@@ -1160,29 +1163,23 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
1160 * then scans all registered channels for state changes that should be handled 1163 * then scans all registered channels for state changes that should be handled
1161 * by creating or destroying smd client devices for the registered channels. 1164 * by creating or destroying smd client devices for the registered channels.
1162 * 1165 *
1163 * LOCKING: edge->channels_lock is not needed to be held during the traversal 1166 * LOCKING: edge->channels_lock only needs to cover the list operations, as the
1164 * of the channels list as it's done synchronously with the only writer. 1167 * worker is killed before any channels are deallocated
1165 */ 1168 */
1166static void qcom_channel_state_worker(struct work_struct *work) 1169static void qcom_channel_state_worker(struct work_struct *work)
1167{ 1170{
1168 struct qcom_smd_channel *channel; 1171 struct qcom_smd_channel *channel;
1169 struct qcom_smd_edge *edge = container_of(work, 1172 struct qcom_smd_edge *edge = container_of(work,
1170 struct qcom_smd_edge, 1173 struct qcom_smd_edge,
1171 work); 1174 state_work);
1172 unsigned remote_state; 1175 unsigned remote_state;
1173 1176 unsigned long flags;
1174 /*
1175 * Rescan smem if we have reason to belive that there are new channels.
1176 */
1177 if (edge->need_rescan) {
1178 edge->need_rescan = false;
1179 qcom_discover_channels(edge);
1180 }
1181 1177
1182 /* 1178 /*
1183 * Register a device for any closed channel where the remote processor 1179 * Register a device for any closed channel where the remote processor
1184 * is showing interest in opening the channel. 1180 * is showing interest in opening the channel.
1185 */ 1181 */
1182 spin_lock_irqsave(&edge->channels_lock, flags);
1186 list_for_each_entry(channel, &edge->channels, list) { 1183 list_for_each_entry(channel, &edge->channels, list) {
1187 if (channel->state != SMD_CHANNEL_CLOSED) 1184 if (channel->state != SMD_CHANNEL_CLOSED)
1188 continue; 1185 continue;
@@ -1192,7 +1189,9 @@ static void qcom_channel_state_worker(struct work_struct *work)
1192 remote_state != SMD_CHANNEL_OPENED) 1189 remote_state != SMD_CHANNEL_OPENED)
1193 continue; 1190 continue;
1194 1191
1192 spin_unlock_irqrestore(&edge->channels_lock, flags);
1195 qcom_smd_create_device(channel); 1193 qcom_smd_create_device(channel);
1194 spin_lock_irqsave(&edge->channels_lock, flags);
1196 } 1195 }
1197 1196
1198 /* 1197 /*
@@ -1209,8 +1208,11 @@ static void qcom_channel_state_worker(struct work_struct *work)
1209 remote_state == SMD_CHANNEL_OPENED) 1208 remote_state == SMD_CHANNEL_OPENED)
1210 continue; 1209 continue;
1211 1210
1211 spin_unlock_irqrestore(&edge->channels_lock, flags);
1212 qcom_smd_destroy_device(channel); 1212 qcom_smd_destroy_device(channel);
1213 spin_lock_irqsave(&edge->channels_lock, flags);
1213 } 1214 }
1215 spin_unlock_irqrestore(&edge->channels_lock, flags);
1214} 1216}
1215 1217
1216/* 1218/*
@@ -1228,7 +1230,8 @@ static int qcom_smd_parse_edge(struct device *dev,
1228 INIT_LIST_HEAD(&edge->channels); 1230 INIT_LIST_HEAD(&edge->channels);
1229 spin_lock_init(&edge->channels_lock); 1231 spin_lock_init(&edge->channels_lock);
1230 1232
1231 INIT_WORK(&edge->work, qcom_channel_state_worker); 1233 INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
1234 INIT_WORK(&edge->state_work, qcom_channel_state_worker);
1232 1235
1233 edge->of_node = of_node_get(node); 1236 edge->of_node = of_node_get(node);
1234 1237
@@ -1317,8 +1320,7 @@ static int qcom_smd_probe(struct platform_device *pdev)
1317 if (ret) 1320 if (ret)
1318 continue; 1321 continue;
1319 1322
1320 edge->need_rescan = true; 1323 schedule_work(&edge->scan_work);
1321 schedule_work(&edge->work);
1322 } 1324 }
1323 1325
1324 platform_set_drvdata(pdev, smd); 1326 platform_set_drvdata(pdev, smd);
@@ -1341,8 +1343,10 @@ static int qcom_smd_remove(struct platform_device *pdev)
1341 edge = &smd->edges[i]; 1343 edge = &smd->edges[i];
1342 1344
1343 disable_irq(edge->irq); 1345 disable_irq(edge->irq);
1344 cancel_work_sync(&edge->work); 1346 cancel_work_sync(&edge->scan_work);
1347 cancel_work_sync(&edge->state_work);
1345 1348
1349 /* No need to lock here, because the writer is gone */
1346 list_for_each_entry(channel, &edge->channels, list) { 1350 list_for_each_entry(channel, &edge->channels, list) {
1347 if (!channel->qsdev) 1351 if (!channel->qsdev)
1348 continue; 1352 continue;