aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/wbsd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/wbsd.c')
-rw-r--r--drivers/mmc/wbsd.c102
1 files changed, 66 insertions, 36 deletions
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 7a282672f8e9..a44d8777ab9f 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver 2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 * 3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved. 4 * Copyright (C) 2004-2006 Pierre Ossman, All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -272,16 +272,9 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
272 return host->num_sg; 272 return host->num_sg;
273} 273}
274 274
275static inline char *wbsd_kmap_sg(struct wbsd_host *host) 275static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
276{ 276{
277 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) + 277 return page_address(host->cur_sg->page) + host->cur_sg->offset;
278 host->cur_sg->offset;
279 return host->mapped_sg;
280}
281
282static inline void wbsd_kunmap_sg(struct wbsd_host *host)
283{
284 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
285} 278}
286 279
287static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) 280static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
@@ -302,12 +295,11 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
302 * we do not transfer too much. 295 * we do not transfer too much.
303 */ 296 */
304 for (i = 0; i < len; i++) { 297 for (i = 0; i < len; i++) {
305 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 298 sgbuf = page_address(sg[i].page) + sg[i].offset;
306 if (size < sg[i].length) 299 if (size < sg[i].length)
307 memcpy(dmabuf, sgbuf, size); 300 memcpy(dmabuf, sgbuf, size);
308 else 301 else
309 memcpy(dmabuf, sgbuf, sg[i].length); 302 memcpy(dmabuf, sgbuf, sg[i].length);
310 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
311 dmabuf += sg[i].length; 303 dmabuf += sg[i].length;
312 304
313 if (size < sg[i].length) 305 if (size < sg[i].length)
@@ -347,7 +339,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
347 * we do not transfer too much. 339 * we do not transfer too much.
348 */ 340 */
349 for (i = 0; i < len; i++) { 341 for (i = 0; i < len; i++) {
350 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 342 sgbuf = page_address(sg[i].page) + sg[i].offset;
351 if (size < sg[i].length) 343 if (size < sg[i].length)
352 memcpy(sgbuf, dmabuf, size); 344 memcpy(sgbuf, dmabuf, size);
353 else 345 else
@@ -497,7 +489,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
497 if (data->bytes_xfered == host->size) 489 if (data->bytes_xfered == host->size)
498 return; 490 return;
499 491
500 buffer = wbsd_kmap_sg(host) + host->offset; 492 buffer = wbsd_sg_to_buffer(host) + host->offset;
501 493
502 /* 494 /*
503 * Drain the fifo. This has a tendency to loop longer 495 * Drain the fifo. This has a tendency to loop longer
@@ -526,17 +518,13 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
526 /* 518 /*
527 * Transfer done? 519 * Transfer done?
528 */ 520 */
529 if (data->bytes_xfered == host->size) { 521 if (data->bytes_xfered == host->size)
530 wbsd_kunmap_sg(host);
531 return; 522 return;
532 }
533 523
534 /* 524 /*
535 * End of scatter list entry? 525 * End of scatter list entry?
536 */ 526 */
537 if (host->remain == 0) { 527 if (host->remain == 0) {
538 wbsd_kunmap_sg(host);
539
540 /* 528 /*
541 * Get next entry. Check if last. 529 * Get next entry. Check if last.
542 */ 530 */
@@ -554,13 +542,11 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
554 return; 542 return;
555 } 543 }
556 544
557 buffer = wbsd_kmap_sg(host); 545 buffer = wbsd_sg_to_buffer(host);
558 } 546 }
559 } 547 }
560 } 548 }
561 549
562 wbsd_kunmap_sg(host);
563
564 /* 550 /*
565 * This is a very dirty hack to solve a 551 * This is a very dirty hack to solve a
566 * hardware problem. The chip doesn't trigger 552 * hardware problem. The chip doesn't trigger
@@ -583,7 +569,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
583 if (data->bytes_xfered == host->size) 569 if (data->bytes_xfered == host->size)
584 return; 570 return;
585 571
586 buffer = wbsd_kmap_sg(host) + host->offset; 572 buffer = wbsd_sg_to_buffer(host) + host->offset;
587 573
588 /* 574 /*
589 * Fill the fifo. This has a tendency to loop longer 575 * Fill the fifo. This has a tendency to loop longer
@@ -612,17 +598,13 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
612 /* 598 /*
613 * Transfer done? 599 * Transfer done?
614 */ 600 */
615 if (data->bytes_xfered == host->size) { 601 if (data->bytes_xfered == host->size)
616 wbsd_kunmap_sg(host);
617 return; 602 return;
618 }
619 603
620 /* 604 /*
621 * End of scatter list entry? 605 * End of scatter list entry?
622 */ 606 */
623 if (host->remain == 0) { 607 if (host->remain == 0) {
624 wbsd_kunmap_sg(host);
625
626 /* 608 /*
627 * Get next entry. Check if last. 609 * Get next entry. Check if last.
628 */ 610 */
@@ -640,13 +622,11 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
640 return; 622 return;
641 } 623 }
642 624
643 buffer = wbsd_kmap_sg(host); 625 buffer = wbsd_sg_to_buffer(host);
644 } 626 }
645 } 627 }
646 } 628 }
647 629
648 wbsd_kunmap_sg(host);
649
650 /* 630 /*
651 * The controller stops sending interrupts for 631 * The controller stops sending interrupts for
652 * 'FIFO empty' under certain conditions. So we 632 * 'FIFO empty' under certain conditions. So we
@@ -910,6 +890,45 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
910 */ 890 */
911 if (cmd->data && (cmd->error == MMC_ERR_NONE)) { 891 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
912 /* 892 /*
893 * The hardware is so delightfully stupid that it has a list
894 * of "data" commands. If a command isn't on this list, it'll
895 * just go back to the idle state and won't send any data
896 * interrupts.
897 */
898 switch (cmd->opcode) {
899 case 11:
900 case 17:
901 case 18:
902 case 20:
903 case 24:
904 case 25:
905 case 26:
906 case 27:
907 case 30:
908 case 42:
909 case 56:
910 break;
911
912 /* ACMDs. We don't keep track of state, so we just treat them
913 * like any other command. */
914 case 51:
915 break;
916
917 default:
918#ifdef CONFIG_MMC_DEBUG
919 printk(KERN_WARNING "%s: Data command %d is not "
920 "supported by this controller.\n",
921 mmc_hostname(host->mmc), cmd->opcode);
922#endif
923 cmd->data->error = MMC_ERR_INVALID;
924
925 if (cmd->data->stop)
926 wbsd_send_command(host, cmd->data->stop);
927
928 goto done;
929 };
930
931 /*
913 * Dirty fix for hardware bug. 932 * Dirty fix for hardware bug.
914 */ 933 */
915 if (host->dma == -1) 934 if (host->dma == -1)
@@ -1343,16 +1362,27 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
1343 mmc->max_phys_segs = 128; 1362 mmc->max_phys_segs = 128;
1344 1363
1345 /* 1364 /*
1346 * Maximum number of sectors in one transfer. Also limited by 64kB 1365 * Maximum request size. Also limited by 64KiB buffer.
1347 * buffer.
1348 */ 1366 */
1349 mmc->max_sectors = 128; 1367 mmc->max_req_size = 65536;
1350 1368
1351 /* 1369 /*
1352 * Maximum segment size. Could be one segment with the maximum number 1370 * Maximum segment size. Could be one segment with the maximum number
1353 * of segments. 1371 * of bytes.
1372 */
1373 mmc->max_seg_size = mmc->max_req_size;
1374
1375 /*
1376 * Maximum block size. We have 12 bits (= 4095) but have to subtract
1377 * space for CRC. So the maximum is 4095 - 4*2 = 4087.
1378 */
1379 mmc->max_blk_size = 4087;
1380
1381 /*
1382 * Maximum block count. There is no real limit so the maximum
1383 * request size will be the only restriction.
1354 */ 1384 */
1355 mmc->max_seg_size = mmc->max_sectors * 512; 1385 mmc->max_blk_count = mmc->max_req_size;
1356 1386
1357 dev_set_drvdata(dev, mmc); 1387 dev_set_drvdata(dev, mmc);
1358 1388