aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorPierre Ossman <drzeus@drzeus.cx>2007-09-27 04:48:29 -0400
committerPierre Ossman <drzeus@drzeus.cx>2007-09-27 04:48:29 -0400
commit6f4285d13300f1c8cd675a41ab390cea06173cd1 (patch)
treed0611c5e278af5c85bb157cd1b5c0db233ecaf02 /drivers/mmc
parent5d3ad4e8a12e538eead0a37d22b1ba6aec0f2127 (diff)
sdio: adaptive interrupt polling
The interrupt polling frequency is a compromise between power usage and interrupt latency. Unfortunately, it affects throughput rather severely for devices which require an interrupt for every chunk of data. By making the polling frequency adaptive, we get better throughput with those devices without sacficing too much power. Polling will quickly increase when there is an actual interrupt, and slowly fall back to the idle frequency when the interrupts stop coming. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/core/sdio_irq.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 8843a4c2fe91..f78ffeefed52 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -27,7 +27,7 @@
27 27
28static int process_sdio_pending_irqs(struct mmc_card *card) 28static int process_sdio_pending_irqs(struct mmc_card *card)
29{ 29{
30 int i, ret; 30 int i, ret, count;
31 unsigned char pending; 31 unsigned char pending;
32 32
33 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending); 33 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
@@ -37,6 +37,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
37 return ret; 37 return ret;
38 } 38 }
39 39
40 count = 0;
40 for (i = 1; i <= 7; i++) { 41 for (i = 1; i <= 7; i++) {
41 if (pending & (1 << i)) { 42 if (pending & (1 << i)) {
42 struct sdio_func *func = card->sdio_func[i - 1]; 43 struct sdio_func *func = card->sdio_func[i - 1];
@@ -46,20 +47,21 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
46 sdio_func_id(func)); 47 sdio_func_id(func));
47 } else if (func->irq_handler) { 48 } else if (func->irq_handler) {
48 func->irq_handler(func); 49 func->irq_handler(func);
50 count++;
49 } else 51 } else
50 printk(KERN_WARNING "%s: pending IRQ with no handler\n", 52 printk(KERN_WARNING "%s: pending IRQ with no handler\n",
51 sdio_func_id(func)); 53 sdio_func_id(func));
52 } 54 }
53 } 55 }
54 56
55 return 0; 57 return count;
56} 58}
57 59
58static int sdio_irq_thread(void *_host) 60static int sdio_irq_thread(void *_host)
59{ 61{
60 struct mmc_host *host = _host; 62 struct mmc_host *host = _host;
61 struct sched_param param = { .sched_priority = 1 }; 63 struct sched_param param = { .sched_priority = 1 };
62 unsigned long period; 64 unsigned long period, idle_period;
63 int ret; 65 int ret;
64 66
65 sched_setscheduler(current, SCHED_FIFO, &param); 67 sched_setscheduler(current, SCHED_FIFO, &param);
@@ -70,8 +72,9 @@ static int sdio_irq_thread(void *_host)
70 * asynchronous notification of pending SDIO card interrupts 72 * asynchronous notification of pending SDIO card interrupts
71 * hence we poll for them in that case. 73 * hence we poll for them in that case.
72 */ 74 */
75 idle_period = msecs_to_jiffies(10);
73 period = (host->caps & MMC_CAP_SDIO_IRQ) ? 76 period = (host->caps & MMC_CAP_SDIO_IRQ) ?
74 MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(10); 77 MAX_SCHEDULE_TIMEOUT : idle_period;
75 78
76 pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n", 79 pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
77 mmc_hostname(host), period); 80 mmc_hostname(host), period);
@@ -101,9 +104,24 @@ static int sdio_irq_thread(void *_host)
101 * errors. FIXME: determine if due to card removal and 104 * errors. FIXME: determine if due to card removal and
102 * possibly exit this thread if so. 105 * possibly exit this thread if so.
103 */ 106 */
104 if (ret) 107 if (ret < 0)
105 ssleep(1); 108 ssleep(1);
106 109
110 /*
111 * Adaptive polling frequency based on the assumption
112 * that an interrupt will be closely followed by more.
113 * This has a substantial benefit for network devices.
114 */
115 if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
116 if (ret > 0)
117 period /= 2;
118 else {
119 period++;
120 if (period > idle_period)
121 period = idle_period;
122 }
123 }
124
107 set_task_state(current, TASK_INTERRUPTIBLE); 125 set_task_state(current, TASK_INTERRUPTIBLE);
108 if (host->caps & MMC_CAP_SDIO_IRQ) 126 if (host->caps & MMC_CAP_SDIO_IRQ)
109 host->ops->enable_sdio_irq(host, 1); 127 host->ops->enable_sdio_irq(host, 1);