diff options
author | Fancy Fang <B47543@freescale.com> | 2013-12-09 05:36:39 -0500 |
---|---|---|
committer | Nitin Garg <nitin.garg@freescale.com> | 2014-04-16 09:47:28 -0400 |
commit | f296d38e7f1301a9869197667842c18207c2fbfc (patch) | |
tree | 15a27d5288fedfe46d9eb48a7f02b49e59faa2e4 | |
parent | ce910d5a6537c67d1351192e9cf5df43f2935e09 (diff) |
ENGR00291400 PXP: Organize PXP task queue to be FIFO
The requested PXP tasks were handled based on channel unit. All the
tasks in one channel were handled one by one, and the tasks in another
channel only can get chance after all the tasks in previous channel
were finished. So this may allow some channel occupies PXP hardware
exclusively all the time, and other channels may never get PXP services.
So this change makes the PXP task queue to be a FIFO to avoid this kind
of unfair usage for PXP.
Signed-off-by: Fancy Fang <B47543@freescale.com>
-rw-r--r-- | drivers/dma/pxp/pxp_dma_v2.c | 59 | ||||
-rw-r--r-- | include/linux/pxp_dma.h | 1 |
2 files changed, 9 insertions, 51 deletions
diff --git a/drivers/dma/pxp/pxp_dma_v2.c b/drivers/dma/pxp/pxp_dma_v2.c index f92a25560d24..55f0a5a6adf9 100644 --- a/drivers/dma/pxp/pxp_dma_v2.c +++ b/drivers/dma/pxp/pxp_dma_v2.c | |||
@@ -1066,11 +1066,6 @@ static void pxp_clkoff_timer(unsigned long arg) | |||
1066 | jiffies + msecs_to_jiffies(timeout_in_ms)); | 1066 | jiffies + msecs_to_jiffies(timeout_in_ms)); |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | static struct pxp_tx_desc *pxpdma_first_active(struct pxp_channel *pxp_chan) | ||
1070 | { | ||
1071 | return list_entry(pxp_chan->active_list.next, struct pxp_tx_desc, list); | ||
1072 | } | ||
1073 | |||
1074 | static struct pxp_tx_desc *pxpdma_first_queued(struct pxp_channel *pxp_chan) | 1069 | static struct pxp_tx_desc *pxpdma_first_queued(struct pxp_channel *pxp_chan) |
1075 | { | 1070 | { |
1076 | return list_entry(pxp_chan->queue.next, struct pxp_tx_desc, list); | 1071 | return list_entry(pxp_chan->queue.next, struct pxp_tx_desc, list); |
@@ -1085,9 +1080,8 @@ static void __pxpdma_dostart(struct pxp_channel *pxp_chan) | |||
1085 | struct pxp_tx_desc *child; | 1080 | struct pxp_tx_desc *child; |
1086 | int i = 0; | 1081 | int i = 0; |
1087 | 1082 | ||
1088 | /* so far we presume only one transaction on active_list */ | ||
1089 | /* S0 */ | 1083 | /* S0 */ |
1090 | desc = pxpdma_first_active(pxp_chan); | 1084 | desc = list_first_entry(&head, struct pxp_tx_desc, list); |
1091 | memcpy(&pxp->pxp_conf_state.s0_param, | 1085 | memcpy(&pxp->pxp_conf_state.s0_param, |
1092 | &desc->layer_param.s0_param, sizeof(struct pxp_layer_param)); | 1086 | &desc->layer_param.s0_param, sizeof(struct pxp_layer_param)); |
1093 | memcpy(&pxp->pxp_conf_state.proc_data, | 1087 | memcpy(&pxp->pxp_conf_state.proc_data, |
@@ -1120,7 +1114,8 @@ static void __pxpdma_dostart(struct pxp_channel *pxp_chan) | |||
1120 | static void pxpdma_dostart_work(struct pxps *pxp) | 1114 | static void pxpdma_dostart_work(struct pxps *pxp) |
1121 | { | 1115 | { |
1122 | struct pxp_channel *pxp_chan = NULL; | 1116 | struct pxp_channel *pxp_chan = NULL; |
1123 | unsigned long flags, flags1; | 1117 | unsigned long flags; |
1118 | struct pxp_tx_desc *desc = NULL; | ||
1124 | 1119 | ||
1125 | spin_lock_irqsave(&pxp->lock, flags); | 1120 | spin_lock_irqsave(&pxp->lock, flags); |
1126 | if (list_empty(&head)) { | 1121 | if (list_empty(&head)) { |
@@ -1129,16 +1124,10 @@ static void pxpdma_dostart_work(struct pxps *pxp) | |||
1129 | return; | 1124 | return; |
1130 | } | 1125 | } |
1131 | 1126 | ||
1132 | pxp_chan = list_entry(head.next, struct pxp_channel, list); | 1127 | desc = list_entry(head.next, struct pxp_tx_desc, list); |
1128 | pxp_chan = to_pxp_channel(desc->txd.chan); | ||
1133 | 1129 | ||
1134 | spin_lock_irqsave(&pxp_chan->lock, flags1); | 1130 | __pxpdma_dostart(pxp_chan); |
1135 | if (!list_empty(&pxp_chan->active_list)) { | ||
1136 | struct pxp_tx_desc *desc; | ||
1137 | /* REVISIT */ | ||
1138 | desc = pxpdma_first_active(pxp_chan); | ||
1139 | __pxpdma_dostart(pxp_chan); | ||
1140 | } | ||
1141 | spin_unlock_irqrestore(&pxp_chan->lock, flags1); | ||
1142 | 1131 | ||
1143 | /* Configure PxP */ | 1132 | /* Configure PxP */ |
1144 | pxp_config(pxp, pxp_chan); | 1133 | pxp_config(pxp, pxp_chan); |
@@ -1209,7 +1198,6 @@ static int pxp_init_channel(struct pxp_dma *pxp_dma, | |||
1209 | * (i.e., pxp_tx_desc) here. | 1198 | * (i.e., pxp_tx_desc) here. |
1210 | */ | 1199 | */ |
1211 | 1200 | ||
1212 | INIT_LIST_HEAD(&pxp_chan->active_list); | ||
1213 | INIT_LIST_HEAD(&pxp_chan->queue); | 1201 | INIT_LIST_HEAD(&pxp_chan->queue); |
1214 | 1202 | ||
1215 | return ret; | 1203 | return ret; |
@@ -1241,18 +1229,9 @@ static irqreturn_t pxp_irq(int irq, void *dev_id) | |||
1241 | return IRQ_NONE; | 1229 | return IRQ_NONE; |
1242 | } | 1230 | } |
1243 | 1231 | ||
1244 | pxp_chan = list_entry(head.next, struct pxp_channel, list); | ||
1245 | |||
1246 | if (list_empty(&pxp_chan->active_list)) { | ||
1247 | pr_debug("PXP_IRQ pxp_chan->active_list empty. chan_id %d\n", | ||
1248 | pxp_chan->dma_chan.chan_id); | ||
1249 | pxp->pxp_ongoing = 0; | ||
1250 | spin_unlock_irqrestore(&pxp->lock, flags); | ||
1251 | return IRQ_NONE; | ||
1252 | } | ||
1253 | |||
1254 | /* Get descriptor and call callback */ | 1232 | /* Get descriptor and call callback */ |
1255 | desc = pxpdma_first_active(pxp_chan); | 1233 | desc = list_entry(head.next, struct pxp_tx_desc, list); |
1234 | pxp_chan = to_pxp_channel(desc->txd.chan); | ||
1256 | 1235 | ||
1257 | pxp_chan->completed = desc->txd.cookie; | 1236 | pxp_chan->completed = desc->txd.cookie; |
1258 | 1237 | ||
@@ -1274,9 +1253,6 @@ static irqreturn_t pxp_irq(int irq, void *dev_id) | |||
1274 | list_del_init(&desc->list); | 1253 | list_del_init(&desc->list); |
1275 | kmem_cache_free(tx_desc_cache, (void *)desc); | 1254 | kmem_cache_free(tx_desc_cache, (void *)desc); |
1276 | 1255 | ||
1277 | if (list_empty(&pxp_chan->active_list)) | ||
1278 | list_del_init(&pxp_chan->list); | ||
1279 | |||
1280 | complete(&pxp->complete); | 1256 | complete(&pxp->complete); |
1281 | pxp->pxp_ongoing = 0; | 1257 | pxp->pxp_ongoing = 0; |
1282 | mod_timer(&pxp->clk_timer, jiffies + msecs_to_jiffies(timeout_in_ms)); | 1258 | mod_timer(&pxp->clk_timer, jiffies + msecs_to_jiffies(timeout_in_ms)); |
@@ -1380,30 +1356,13 @@ static void pxp_issue_pending(struct dma_chan *chan) | |||
1380 | struct pxp_dma *pxp_dma = to_pxp_dma(chan->device); | 1356 | struct pxp_dma *pxp_dma = to_pxp_dma(chan->device); |
1381 | struct pxps *pxp = to_pxp(pxp_dma); | 1357 | struct pxps *pxp = to_pxp(pxp_dma); |
1382 | unsigned long flags0, flags; | 1358 | unsigned long flags0, flags; |
1383 | struct list_head *iter; | ||
1384 | 1359 | ||
1385 | spin_lock_irqsave(&pxp->lock, flags0); | 1360 | spin_lock_irqsave(&pxp->lock, flags0); |
1386 | spin_lock_irqsave(&pxp_chan->lock, flags); | 1361 | spin_lock_irqsave(&pxp_chan->lock, flags); |
1387 | 1362 | ||
1388 | if (!list_empty(&pxp_chan->queue)) { | 1363 | if (!list_empty(&pxp_chan->queue)) { |
1389 | pxpdma_dequeue(pxp_chan, &pxp_chan->active_list); | 1364 | pxpdma_dequeue(pxp_chan, &head); |
1390 | pxp_chan->status = PXP_CHANNEL_READY; | 1365 | pxp_chan->status = PXP_CHANNEL_READY; |
1391 | iter = head.next; | ||
1392 | /* Avoid adding a pxp channel to head list which | ||
1393 | * has been already listed in it. And this may | ||
1394 | * cause the head list to be broken down. | ||
1395 | */ | ||
1396 | if (list_empty(&head)) { | ||
1397 | list_add_tail(&pxp_chan->list, &head); | ||
1398 | } else { | ||
1399 | while (iter != &head) { | ||
1400 | if (&pxp_chan->list == iter) | ||
1401 | break; | ||
1402 | iter = iter->next; | ||
1403 | } | ||
1404 | if (iter == &head) | ||
1405 | list_add_tail(&pxp_chan->list, &head); | ||
1406 | } | ||
1407 | } else { | 1366 | } else { |
1408 | spin_unlock_irqrestore(&pxp_chan->lock, flags); | 1367 | spin_unlock_irqrestore(&pxp_chan->lock, flags); |
1409 | spin_unlock_irqrestore(&pxp->lock, flags0); | 1368 | spin_unlock_irqrestore(&pxp->lock, flags0); |
diff --git a/include/linux/pxp_dma.h b/include/linux/pxp_dma.h index 7cb5436a575e..41c8a39042c3 100644 --- a/include/linux/pxp_dma.h +++ b/include/linux/pxp_dma.h | |||
@@ -45,7 +45,6 @@ struct pxp_channel { | |||
45 | void *client; /* Only one client per channel */ | 45 | void *client; /* Only one client per channel */ |
46 | unsigned int n_tx_desc; | 46 | unsigned int n_tx_desc; |
47 | struct pxp_tx_desc *desc; /* allocated tx-descriptors */ | 47 | struct pxp_tx_desc *desc; /* allocated tx-descriptors */ |
48 | struct list_head active_list; /* active tx-descriptors */ | ||
49 | struct list_head queue; /* queued tx-descriptors */ | 48 | struct list_head queue; /* queued tx-descriptors */ |
50 | struct list_head list; /* track queued channel number */ | 49 | struct list_head list; /* track queued channel number */ |
51 | spinlock_t lock; /* protects sg[0,1], queue */ | 50 | spinlock_t lock; /* protects sg[0,1], queue */ |