aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@nokia.com>2010-05-26 17:42:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 12:12:40 -0400
commitb417577d3b9bbb06a4ddc9aa955af9bd503f7242 (patch)
tree32ff825e6d4ee925eff02932b2954a449b060e56 /drivers
parent14c5aa6d3797c6b3d09193a8423472847f738526 (diff)
omap_hsmmc: improve interrupt synchronisation
The following changes were needed: - do not use in_interrupt() because it will not work with threaded interrupts In addition, the following improvements were made: - ensure DMA is unmapped only after the final DMA interrupt - ensure a request is completed only after the final DMA interrupt - disable controller interrupts when a request is not in progress - remove the spin-lock protecting the start of a new request from an unexpected interrupt because the locking was complicated and a 'req_in_progress' flag suffices (since the spin-lock only defers the unexpected interrupts anyway) - instead use the spin-lock to protect the MMC interrupt handler from the DMA interrupt handler - remove the semaphore preventing DMA from being started while the previous DMA is still in progress - the other changes make that impossible, so it is now a BUG_ON condition - ensure the controller interrupt status is clear before exiting the interrrupt handler In general, these changes make the code safer but do not fix any specific bugs so backporting is not necessary. Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com> Tested-by: Venkatraman S <svenkatr@ti.com> Acked-by: Madhusudhan Chikkature <madhu.cr@ti.com> Acked-by: Tony Lindgren <tony@atomide.com> Cc: <linux-mmc@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mmc/host/omap_hsmmc.c262
1 files changed, 134 insertions, 128 deletions
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e9caf694c59e..c9401efd1137 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -157,12 +157,10 @@ struct omap_hsmmc_host {
157 */ 157 */
158 struct regulator *vcc; 158 struct regulator *vcc;
159 struct regulator *vcc_aux; 159 struct regulator *vcc_aux;
160 struct semaphore sem;
161 struct work_struct mmc_carddetect_work; 160 struct work_struct mmc_carddetect_work;
162 void __iomem *base; 161 void __iomem *base;
163 resource_size_t mapbase; 162 resource_size_t mapbase;
164 spinlock_t irq_lock; /* Prevent races with irq handler */ 163 spinlock_t irq_lock; /* Prevent races with irq handler */
165 unsigned long flags;
166 unsigned int id; 164 unsigned int id;
167 unsigned int dma_len; 165 unsigned int dma_len;
168 unsigned int dma_sg_idx; 166 unsigned int dma_sg_idx;
@@ -183,6 +181,7 @@ struct omap_hsmmc_host {
183 int protect_card; 181 int protect_card;
184 int reqs_blocked; 182 int reqs_blocked;
185 int use_reg; 183 int use_reg;
184 int req_in_progress;
186 185
187 struct omap_mmc_platform_data *pdata; 186 struct omap_mmc_platform_data *pdata;
188}; 187};
@@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
524 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); 523 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
525} 524}
526 525
526static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
527{
528 unsigned int irq_mask;
529
530 if (host->use_dma)
531 irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE);
532 else
533 irq_mask = INT_EN_MASK;
534
535 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
536 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
537 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
538}
539
540static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
541{
542 OMAP_HSMMC_WRITE(host->base, ISE, 0);
543 OMAP_HSMMC_WRITE(host->base, IE, 0);
544 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
545}
546
527#ifdef CONFIG_PM 547#ifdef CONFIG_PM
528 548
529/* 549/*
@@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
592 && time_before(jiffies, timeout)) 612 && time_before(jiffies, timeout))
593 ; 613 ;
594 614
595 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 615 omap_hsmmc_disable_irq(host);
596 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
597 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
598 616
599 /* Do not initialize card-specific things if the power is off */ 617 /* Do not initialize card-specific things if the power is off */
600 if (host->power_mode == MMC_POWER_OFF) 618 if (host->power_mode == MMC_POWER_OFF)
@@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host)
697 return; 715 return;
698 716
699 disable_irq(host->irq); 717 disable_irq(host->irq);
718
719 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
700 OMAP_HSMMC_WRITE(host->base, CON, 720 OMAP_HSMMC_WRITE(host->base, CON,
701 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); 721 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
702 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); 722 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
@@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
762 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); 782 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
763 host->cmd = cmd; 783 host->cmd = cmd;
764 784
765 /* 785 omap_hsmmc_enable_irq(host);
766 * Clear status bits and enable interrupts
767 */
768 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
769 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
770
771 if (host->use_dma)
772 OMAP_HSMMC_WRITE(host->base, IE,
773 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
774 else
775 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
776 786
777 host->response_busy = 0; 787 host->response_busy = 0;
778 if (cmd->flags & MMC_RSP_PRESENT) { 788 if (cmd->flags & MMC_RSP_PRESENT) {
@@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
806 if (host->use_dma) 816 if (host->use_dma)
807 cmdreg |= DMA_EN; 817 cmdreg |= DMA_EN;
808 818
809 /* 819 host->req_in_progress = 1;
810 * In an interrupt context (i.e. STOP command), the spinlock is unlocked
811 * by the interrupt handler, otherwise (i.e. for a new request) it is
812 * unlocked here.
813 */
814 if (!in_interrupt())
815 spin_unlock_irqrestore(&host->irq_lock, host->flags);
816 820
817 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); 821 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
818 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 822 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
@@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
827 return DMA_FROM_DEVICE; 831 return DMA_FROM_DEVICE;
828} 832}
829 833
834static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
835{
836 int dma_ch;
837
838 spin_lock(&host->irq_lock);
839 host->req_in_progress = 0;
840 dma_ch = host->dma_ch;
841 spin_unlock(&host->irq_lock);
842
843 omap_hsmmc_disable_irq(host);
844 /* Do not complete the request if DMA is still in progress */
845 if (mrq->data && host->use_dma && dma_ch != -1)
846 return;
847 host->mrq = NULL;
848 mmc_request_done(host->mmc, mrq);
849}
850
830/* 851/*
831 * Notify the transfer complete to MMC core 852 * Notify the transfer complete to MMC core
832 */ 853 */
@@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
843 return; 864 return;
844 } 865 }
845 866
846 host->mrq = NULL; 867 omap_hsmmc_request_done(host, mrq);
847 mmc_request_done(host->mmc, mrq);
848 return; 868 return;
849 } 869 }
850 870
851 host->data = NULL; 871 host->data = NULL;
852 872
853 if (host->use_dma && host->dma_ch != -1)
854 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
855 omap_hsmmc_get_dma_dir(host, data));
856
857 if (!data->error) 873 if (!data->error)
858 data->bytes_xfered += data->blocks * (data->blksz); 874 data->bytes_xfered += data->blocks * (data->blksz);
859 else 875 else
860 data->bytes_xfered = 0; 876 data->bytes_xfered = 0;
861 877
862 if (!data->stop) { 878 if (!data->stop) {
863 host->mrq = NULL; 879 omap_hsmmc_request_done(host, data->mrq);
864 mmc_request_done(host->mmc, data->mrq);
865 return; 880 return;
866 } 881 }
867 omap_hsmmc_start_command(host, data->stop, NULL); 882 omap_hsmmc_start_command(host, data->stop, NULL);
@@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
887 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); 902 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
888 } 903 }
889 } 904 }
890 if ((host->data == NULL && !host->response_busy) || cmd->error) { 905 if ((host->data == NULL && !host->response_busy) || cmd->error)
891 host->mrq = NULL; 906 omap_hsmmc_request_done(host, cmd->mrq);
892 mmc_request_done(host->mmc, cmd->mrq);
893 }
894} 907}
895 908
896/* 909/*
@@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
898 */ 911 */
899static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 912static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
900{ 913{
914 int dma_ch;
915
901 host->data->error = errno; 916 host->data->error = errno;
902 917
903 if (host->use_dma && host->dma_ch != -1) { 918 spin_lock(&host->irq_lock);
919 dma_ch = host->dma_ch;
920 host->dma_ch = -1;
921 spin_unlock(&host->irq_lock);
922
923 if (host->use_dma && dma_ch != -1) {
904 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 924 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
905 omap_hsmmc_get_dma_dir(host, host->data)); 925 omap_hsmmc_get_dma_dir(host, host->data));
906 omap_free_dma(host->dma_ch); 926 omap_free_dma(dma_ch);
907 host->dma_ch = -1;
908 up(&host->sem);
909 } 927 }
910 host->data = NULL; 928 host->data = NULL;
911} 929}
@@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
967 __func__); 985 __func__);
968} 986}
969 987
970/* 988static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
971 * MMC controller IRQ handler
972 */
973static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
974{ 989{
975 struct omap_hsmmc_host *host = dev_id;
976 struct mmc_data *data; 990 struct mmc_data *data;
977 int end_cmd = 0, end_trans = 0, status; 991 int end_cmd = 0, end_trans = 0;
978 992
979 spin_lock(&host->irq_lock); 993 if (!host->req_in_progress) {
980 994 do {
981 if (host->mrq == NULL) { 995 OMAP_HSMMC_WRITE(host->base, STAT, status);
982 OMAP_HSMMC_WRITE(host->base, STAT, 996 /* Flush posted write */
983 OMAP_HSMMC_READ(host->base, STAT)); 997 status = OMAP_HSMMC_READ(host->base, STAT);
984 /* Flush posted write */ 998 } while (status & INT_EN_MASK);
985 OMAP_HSMMC_READ(host->base, STAT); 999 return;
986 spin_unlock(&host->irq_lock);
987 return IRQ_HANDLED;
988 } 1000 }
989 1001
990 data = host->data; 1002 data = host->data;
991 status = OMAP_HSMMC_READ(host->base, STAT);
992 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 1003 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
993 1004
994 if (status & ERR) { 1005 if (status & ERR) {
@@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1041 } 1052 }
1042 1053
1043 OMAP_HSMMC_WRITE(host->base, STAT, status); 1054 OMAP_HSMMC_WRITE(host->base, STAT, status);
1044 /* Flush posted write */
1045 OMAP_HSMMC_READ(host->base, STAT);
1046 1055
1047 if (end_cmd || ((status & CC) && host->cmd)) 1056 if (end_cmd || ((status & CC) && host->cmd))
1048 omap_hsmmc_cmd_done(host, host->cmd); 1057 omap_hsmmc_cmd_done(host, host->cmd);
1049 if ((end_trans || (status & TC)) && host->mrq) 1058 if ((end_trans || (status & TC)) && host->mrq)
1050 omap_hsmmc_xfer_done(host, data); 1059 omap_hsmmc_xfer_done(host, data);
1060}
1051 1061
1052 spin_unlock(&host->irq_lock); 1062/*
1063 * MMC controller IRQ handler
1064 */
1065static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1066{
1067 struct omap_hsmmc_host *host = dev_id;
1068 int status;
1069
1070 status = OMAP_HSMMC_READ(host->base, STAT);
1071 do {
1072 omap_hsmmc_do_irq(host, status);
1073 /* Flush posted write */
1074 status = OMAP_HSMMC_READ(host->base, STAT);
1075 } while (status & INT_EN_MASK);
1053 1076
1054 return IRQ_HANDLED; 1077 return IRQ_HANDLED;
1055} 1078}
@@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1244/* 1267/*
1245 * DMA call back function 1268 * DMA call back function
1246 */ 1269 */
1247static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) 1270static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1248{ 1271{
1249 struct omap_hsmmc_host *host = data; 1272 struct omap_hsmmc_host *host = cb_data;
1273 struct mmc_data *data = host->mrq->data;
1274 int dma_ch, req_in_progress;
1250 1275
1251 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) 1276 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
1252 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); 1277 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
1253 1278
1254 if (host->dma_ch < 0) 1279 spin_lock(&host->irq_lock);
1280 if (host->dma_ch < 0) {
1281 spin_unlock(&host->irq_lock);
1255 return; 1282 return;
1283 }
1256 1284
1257 host->dma_sg_idx++; 1285 host->dma_sg_idx++;
1258 if (host->dma_sg_idx < host->dma_len) { 1286 if (host->dma_sg_idx < host->dma_len) {
1259 /* Fire up the next transfer. */ 1287 /* Fire up the next transfer. */
1260 omap_hsmmc_config_dma_params(host, host->data, 1288 omap_hsmmc_config_dma_params(host, data,
1261 host->data->sg + host->dma_sg_idx); 1289 data->sg + host->dma_sg_idx);
1290 spin_unlock(&host->irq_lock);
1262 return; 1291 return;
1263 } 1292 }
1264 1293
1265 omap_free_dma(host->dma_ch); 1294 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
1295 omap_hsmmc_get_dma_dir(host, data));
1296
1297 req_in_progress = host->req_in_progress;
1298 dma_ch = host->dma_ch;
1266 host->dma_ch = -1; 1299 host->dma_ch = -1;
1267 /* 1300 spin_unlock(&host->irq_lock);
1268 * DMA Callback: run in interrupt context. 1301
1269 * mutex_unlock will throw a kernel warning if used. 1302 omap_free_dma(dma_ch);
1270 */ 1303
1271 up(&host->sem); 1304 /* If DMA has finished after TC, complete the request */
1305 if (!req_in_progress) {
1306 struct mmc_request *mrq = host->mrq;
1307
1308 host->mrq = NULL;
1309 mmc_request_done(host->mmc, mrq);
1310 }
1272} 1311}
1273 1312
1274/* 1313/*
@@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
1277static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, 1316static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1278 struct mmc_request *req) 1317 struct mmc_request *req)
1279{ 1318{
1280 int dma_ch = 0, ret = 0, err = 1, i; 1319 int dma_ch = 0, ret = 0, i;
1281 struct mmc_data *data = req->data; 1320 struct mmc_data *data = req->data;
1282 1321
1283 /* Sanity check: all the SG entries must be aligned by block size. */ 1322 /* Sanity check: all the SG entries must be aligned by block size. */
@@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1294 */ 1333 */
1295 return -EINVAL; 1334 return -EINVAL;
1296 1335
1297 /* 1336 BUG_ON(host->dma_ch != -1);
1298 * If for some reason the DMA transfer is still active,
1299 * we wait for timeout period and free the dma
1300 */
1301 if (host->dma_ch != -1) {
1302 set_current_state(TASK_UNINTERRUPTIBLE);
1303 schedule_timeout(100);
1304 if (down_trylock(&host->sem)) {
1305 omap_free_dma(host->dma_ch);
1306 host->dma_ch = -1;
1307 up(&host->sem);
1308 return err;
1309 }
1310 } else {
1311 if (down_trylock(&host->sem))
1312 return err;
1313 }
1314 1337
1315 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), 1338 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
1316 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); 1339 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
@@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1410 struct omap_hsmmc_host *host = mmc_priv(mmc); 1433 struct omap_hsmmc_host *host = mmc_priv(mmc);
1411 int err; 1434 int err;
1412 1435
1413 /* 1436 BUG_ON(host->req_in_progress);
1414 * Prevent races with the interrupt handler because of unexpected 1437 BUG_ON(host->dma_ch != -1);
1415 * interrupts, but not if we are already in interrupt context i.e. 1438 if (host->protect_card) {
1416 * retries. 1439 if (host->reqs_blocked < 3) {
1417 */ 1440 /*
1418 if (!in_interrupt()) { 1441 * Ensure the controller is left in a consistent
1419 spin_lock_irqsave(&host->irq_lock, host->flags); 1442 * state by resetting the command and data state
1420 /* 1443 * machines.
1421 * Protect the card from I/O if there is a possibility 1444 */
1422 * it can be removed. 1445 omap_hsmmc_reset_controller_fsm(host, SRD);
1423 */ 1446 omap_hsmmc_reset_controller_fsm(host, SRC);
1424 if (host->protect_card) { 1447 host->reqs_blocked += 1;
1425 if (host->reqs_blocked < 3) { 1448 }
1426 /* 1449 req->cmd->error = -EBADF;
1427 * Ensure the controller is left in a consistent 1450 if (req->data)
1428 * state by resetting the command and data state 1451 req->data->error = -EBADF;
1429 * machines. 1452 req->cmd->retries = 0;
1430 */ 1453 mmc_request_done(mmc, req);
1431 omap_hsmmc_reset_controller_fsm(host, SRD); 1454 return;
1432 omap_hsmmc_reset_controller_fsm(host, SRC); 1455 } else if (host->reqs_blocked)
1433 host->reqs_blocked += 1; 1456 host->reqs_blocked = 0;
1434 }
1435 req->cmd->error = -EBADF;
1436 if (req->data)
1437 req->data->error = -EBADF;
1438 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1439 mmc_request_done(mmc, req);
1440 return;
1441 } else if (host->reqs_blocked)
1442 host->reqs_blocked = 0;
1443 }
1444 WARN_ON(host->mrq != NULL); 1457 WARN_ON(host->mrq != NULL);
1445 host->mrq = req; 1458 host->mrq = req;
1446 err = omap_hsmmc_prepare_data(host, req); 1459 err = omap_hsmmc_prepare_data(host, req);
@@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1449 if (req->data) 1462 if (req->data)
1450 req->data->error = err; 1463 req->data->error = err;
1451 host->mrq = NULL; 1464 host->mrq = NULL;
1452 if (!in_interrupt())
1453 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1454 mmc_request_done(mmc, req); 1465 mmc_request_done(mmc, req);
1455 return; 1466 return;
1456 } 1467 }
@@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2019 mmc->f_min = 400000; 2030 mmc->f_min = 400000;
2020 mmc->f_max = 52000000; 2031 mmc->f_max = 52000000;
2021 2032
2022 sema_init(&host->sem, 1);
2023 spin_lock_init(&host->irq_lock); 2033 spin_lock_init(&host->irq_lock);
2024 2034
2025 host->iclk = clk_get(&pdev->dev, "ick"); 2035 host->iclk = clk_get(&pdev->dev, "ick");
@@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2162 } 2172 }
2163 } 2173 }
2164 2174
2165 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); 2175 omap_hsmmc_disable_irq(host);
2166 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
2167 2176
2168 mmc_host_lazy_disable(host->mmc); 2177 mmc_host_lazy_disable(host->mmc);
2169 2178
@@ -2283,10 +2292,7 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2283 mmc_host_enable(host->mmc); 2292 mmc_host_enable(host->mmc);
2284 ret = mmc_suspend_host(host->mmc, state); 2293 ret = mmc_suspend_host(host->mmc, state);
2285 if (ret == 0) { 2294 if (ret == 0) {
2286 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2295 omap_hsmmc_disable_irq(host);
2287 OMAP_HSMMC_WRITE(host->base, IE, 0);
2288
2289
2290 OMAP_HSMMC_WRITE(host->base, HCTL, 2296 OMAP_HSMMC_WRITE(host->base, HCTL,
2291 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2297 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2292 mmc_host_disable(host->mmc); 2298 mmc_host_disable(host->mmc);