diff options
Diffstat (limited to 'drivers/mmc/host/sdhci.c')
| -rw-r--r-- | drivers/mmc/host/sdhci.c | 994 |
1 files changed, 531 insertions, 463 deletions
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index b413aa6c246b..17701c3da733 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
| 17 | #include <linux/highmem.h> | 17 | #include <linux/highmem.h> |
| 18 | #include <linux/pci.h> | 18 | #include <linux/io.h> |
| 19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
| 20 | #include <linux/scatterlist.h> | 20 | #include <linux/scatterlist.h> |
| 21 | 21 | ||
| @@ -32,135 +32,6 @@ | |||
| 32 | 32 | ||
| 33 | static unsigned int debug_quirks = 0; | 33 | static unsigned int debug_quirks = 0; |
| 34 | 34 | ||
| 35 | /* | ||
| 36 | * Different quirks to handle when the hardware deviates from a strict | ||
| 37 | * interpretation of the SDHCI specification. | ||
| 38 | */ | ||
| 39 | |||
| 40 | /* Controller doesn't honor resets unless we touch the clock register */ | ||
| 41 | #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) | ||
| 42 | /* Controller has bad caps bits, but really supports DMA */ | ||
| 43 | #define SDHCI_QUIRK_FORCE_DMA (1<<1) | ||
| 44 | /* Controller doesn't like to be reset when there is no card inserted. */ | ||
| 45 | #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) | ||
| 46 | /* Controller doesn't like clearing the power reg before a change */ | ||
| 47 | #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) | ||
| 48 | /* Controller has flaky internal state so reset it on each ios change */ | ||
| 49 | #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) | ||
| 50 | /* Controller has an unusable DMA engine */ | ||
| 51 | #define SDHCI_QUIRK_BROKEN_DMA (1<<5) | ||
| 52 | /* Controller can only DMA from 32-bit aligned addresses */ | ||
| 53 | #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6) | ||
| 54 | /* Controller can only DMA chunk sizes that are a multiple of 32 bits */ | ||
| 55 | #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7) | ||
| 56 | /* Controller needs to be reset after each request to stay stable */ | ||
| 57 | #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8) | ||
| 58 | /* Controller needs voltage and power writes to happen separately */ | ||
| 59 | #define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<9) | ||
| 60 | /* Controller has an off-by-one issue with timeout value */ | ||
| 61 | #define SDHCI_QUIRK_INCR_TIMEOUT_CONTROL (1<<10) | ||
| 62 | |||
| 63 | static const struct pci_device_id pci_ids[] __devinitdata = { | ||
| 64 | { | ||
| 65 | .vendor = PCI_VENDOR_ID_RICOH, | ||
| 66 | .device = PCI_DEVICE_ID_RICOH_R5C822, | ||
| 67 | .subvendor = PCI_VENDOR_ID_IBM, | ||
| 68 | .subdevice = PCI_ANY_ID, | ||
| 69 | .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET | | ||
| 70 | SDHCI_QUIRK_FORCE_DMA, | ||
| 71 | }, | ||
| 72 | |||
| 73 | { | ||
| 74 | .vendor = PCI_VENDOR_ID_RICOH, | ||
| 75 | .device = PCI_DEVICE_ID_RICOH_R5C822, | ||
| 76 | .subvendor = PCI_VENDOR_ID_SAMSUNG, | ||
| 77 | .subdevice = PCI_ANY_ID, | ||
| 78 | .driver_data = SDHCI_QUIRK_FORCE_DMA | | ||
| 79 | SDHCI_QUIRK_NO_CARD_NO_RESET, | ||
| 80 | }, | ||
| 81 | |||
| 82 | { | ||
| 83 | .vendor = PCI_VENDOR_ID_RICOH, | ||
| 84 | .device = PCI_DEVICE_ID_RICOH_R5C822, | ||
| 85 | .subvendor = PCI_ANY_ID, | ||
| 86 | .subdevice = PCI_ANY_ID, | ||
| 87 | .driver_data = SDHCI_QUIRK_FORCE_DMA, | ||
| 88 | }, | ||
| 89 | |||
| 90 | { | ||
| 91 | .vendor = PCI_VENDOR_ID_TI, | ||
| 92 | .device = PCI_DEVICE_ID_TI_XX21_XX11_SD, | ||
| 93 | .subvendor = PCI_ANY_ID, | ||
| 94 | .subdevice = PCI_ANY_ID, | ||
| 95 | .driver_data = SDHCI_QUIRK_FORCE_DMA, | ||
| 96 | }, | ||
| 97 | |||
| 98 | { | ||
| 99 | .vendor = PCI_VENDOR_ID_ENE, | ||
| 100 | .device = PCI_DEVICE_ID_ENE_CB712_SD, | ||
| 101 | .subvendor = PCI_ANY_ID, | ||
| 102 | .subdevice = PCI_ANY_ID, | ||
| 103 | .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | | ||
| 104 | SDHCI_QUIRK_BROKEN_DMA, | ||
| 105 | }, | ||
| 106 | |||
| 107 | { | ||
| 108 | .vendor = PCI_VENDOR_ID_ENE, | ||
| 109 | .device = PCI_DEVICE_ID_ENE_CB712_SD_2, | ||
| 110 | .subvendor = PCI_ANY_ID, | ||
| 111 | .subdevice = PCI_ANY_ID, | ||
| 112 | .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | | ||
| 113 | SDHCI_QUIRK_BROKEN_DMA, | ||
| 114 | }, | ||
| 115 | |||
| 116 | { | ||
| 117 | .vendor = PCI_VENDOR_ID_ENE, | ||
| 118 | .device = PCI_DEVICE_ID_ENE_CB714_SD, | ||
| 119 | .subvendor = PCI_ANY_ID, | ||
| 120 | .subdevice = PCI_ANY_ID, | ||
| 121 | .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | | ||
| 122 | SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | | ||
| 123 | SDHCI_QUIRK_BROKEN_DMA, | ||
| 124 | }, | ||
| 125 | |||
| 126 | { | ||
| 127 | .vendor = PCI_VENDOR_ID_ENE, | ||
| 128 | .device = PCI_DEVICE_ID_ENE_CB714_SD_2, | ||
| 129 | .subvendor = PCI_ANY_ID, | ||
| 130 | .subdevice = PCI_ANY_ID, | ||
| 131 | .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | | ||
| 132 | SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | | ||
| 133 | SDHCI_QUIRK_BROKEN_DMA, | ||
| 134 | }, | ||
| 135 | |||
| 136 | { | ||
| 137 | .vendor = PCI_VENDOR_ID_MARVELL, | ||
| 138 | .device = PCI_DEVICE_ID_MARVELL_CAFE_SD, | ||
| 139 | .subvendor = PCI_ANY_ID, | ||
| 140 | .subdevice = PCI_ANY_ID, | ||
| 141 | .driver_data = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | | ||
| 142 | SDHCI_QUIRK_INCR_TIMEOUT_CONTROL, | ||
| 143 | }, | ||
| 144 | |||
| 145 | { | ||
| 146 | .vendor = PCI_VENDOR_ID_JMICRON, | ||
| 147 | .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, | ||
| 148 | .subvendor = PCI_ANY_ID, | ||
| 149 | .subdevice = PCI_ANY_ID, | ||
| 150 | .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR | | ||
| 151 | SDHCI_QUIRK_32BIT_DMA_SIZE | | ||
| 152 | SDHCI_QUIRK_RESET_AFTER_REQUEST, | ||
| 153 | }, | ||
| 154 | |||
| 155 | { /* Generic SD host controller */ | ||
| 156 | PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) | ||
| 157 | }, | ||
| 158 | |||
| 159 | { /* end: all zeroes */ }, | ||
| 160 | }; | ||
| 161 | |||
| 162 | MODULE_DEVICE_TABLE(pci, pci_ids); | ||
| 163 | |||
| 164 | static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); | 35 | static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); |
| 165 | static void sdhci_finish_data(struct sdhci_host *); | 36 | static void sdhci_finish_data(struct sdhci_host *); |
| 166 | 37 | ||
| @@ -215,7 +86,7 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) | |||
| 215 | { | 86 | { |
| 216 | unsigned long timeout; | 87 | unsigned long timeout; |
| 217 | 88 | ||
| 218 | if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { | 89 | if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { |
| 219 | if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & | 90 | if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & |
| 220 | SDHCI_CARD_PRESENT)) | 91 | SDHCI_CARD_PRESENT)) |
| 221 | return; | 92 | return; |
| @@ -253,7 +124,8 @@ static void sdhci_init(struct sdhci_host *host) | |||
| 253 | SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | | 124 | SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | |
| 254 | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | | 125 | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | |
| 255 | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | | 126 | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | |
| 256 | SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; | 127 | SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | |
| 128 | SDHCI_INT_ADMA_ERROR; | ||
| 257 | 129 | ||
| 258 | writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); | 130 | writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); |
| 259 | writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); | 131 | writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); |
| @@ -443,23 +315,226 @@ static void sdhci_transfer_pio(struct sdhci_host *host) | |||
| 443 | DBG("PIO transfer complete.\n"); | 315 | DBG("PIO transfer complete.\n"); |
| 444 | } | 316 | } |
| 445 | 317 | ||
| 446 | static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | 318 | static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) |
| 447 | { | 319 | { |
| 448 | u8 count; | 320 | local_irq_save(*flags); |
| 449 | unsigned target_timeout, current_timeout; | 321 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; |
| 322 | } | ||
| 450 | 323 | ||
| 451 | WARN_ON(host->data); | 324 | static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) |
| 325 | { | ||
| 326 | kunmap_atomic(buffer, KM_BIO_SRC_IRQ); | ||
| 327 | local_irq_restore(*flags); | ||
| 328 | } | ||
| 452 | 329 | ||
| 453 | if (data == NULL) | 330 | static int sdhci_adma_table_pre(struct sdhci_host *host, |
| 454 | return; | 331 | struct mmc_data *data) |
| 332 | { | ||
| 333 | int direction; | ||
| 455 | 334 | ||
| 456 | /* Sanity checks */ | 335 | u8 *desc; |
| 457 | BUG_ON(data->blksz * data->blocks > 524288); | 336 | u8 *align; |
| 458 | BUG_ON(data->blksz > host->mmc->max_blk_size); | 337 | dma_addr_t addr; |
| 459 | BUG_ON(data->blocks > 65535); | 338 | dma_addr_t align_addr; |
| 339 | int len, offset; | ||
| 460 | 340 | ||
| 461 | host->data = data; | 341 | struct scatterlist *sg; |
| 462 | host->data_early = 0; | 342 | int i; |
| 343 | char *buffer; | ||
| 344 | unsigned long flags; | ||
| 345 | |||
| 346 | /* | ||
| 347 | * The spec does not specify endianness of descriptor table. | ||
| 348 | * We currently guess that it is LE. | ||
| 349 | */ | ||
| 350 | |||
| 351 | if (data->flags & MMC_DATA_READ) | ||
| 352 | direction = DMA_FROM_DEVICE; | ||
| 353 | else | ||
| 354 | direction = DMA_TO_DEVICE; | ||
| 355 | |||
| 356 | /* | ||
| 357 | * The ADMA descriptor table is mapped further down as we | ||
| 358 | * need to fill it with data first. | ||
| 359 | */ | ||
| 360 | |||
| 361 | host->align_addr = dma_map_single(mmc_dev(host->mmc), | ||
| 362 | host->align_buffer, 128 * 4, direction); | ||
| 363 | if (dma_mapping_error(host->align_addr)) | ||
| 364 | goto fail; | ||
| 365 | BUG_ON(host->align_addr & 0x3); | ||
| 366 | |||
| 367 | host->sg_count = dma_map_sg(mmc_dev(host->mmc), | ||
| 368 | data->sg, data->sg_len, direction); | ||
| 369 | if (host->sg_count == 0) | ||
| 370 | goto unmap_align; | ||
| 371 | |||
| 372 | desc = host->adma_desc; | ||
| 373 | align = host->align_buffer; | ||
| 374 | |||
| 375 | align_addr = host->align_addr; | ||
| 376 | |||
| 377 | for_each_sg(data->sg, sg, host->sg_count, i) { | ||
| 378 | addr = sg_dma_address(sg); | ||
| 379 | len = sg_dma_len(sg); | ||
| 380 | |||
| 381 | /* | ||
| 382 | * The SDHCI specification states that ADMA | ||
| 383 | * addresses must be 32-bit aligned. If they | ||
| 384 | * aren't, then we use a bounce buffer for | ||
| 385 | * the (up to three) bytes that screw up the | ||
| 386 | * alignment. | ||
| 387 | */ | ||
| 388 | offset = (4 - (addr & 0x3)) & 0x3; | ||
| 389 | if (offset) { | ||
| 390 | if (data->flags & MMC_DATA_WRITE) { | ||
| 391 | buffer = sdhci_kmap_atomic(sg, &flags); | ||
| 392 | memcpy(align, buffer, offset); | ||
| 393 | sdhci_kunmap_atomic(buffer, &flags); | ||
| 394 | } | ||
| 395 | |||
| 396 | desc[7] = (align_addr >> 24) & 0xff; | ||
| 397 | desc[6] = (align_addr >> 16) & 0xff; | ||
| 398 | desc[5] = (align_addr >> 8) & 0xff; | ||
| 399 | desc[4] = (align_addr >> 0) & 0xff; | ||
| 400 | |||
| 401 | BUG_ON(offset > 65536); | ||
| 402 | |||
| 403 | desc[3] = (offset >> 8) & 0xff; | ||
| 404 | desc[2] = (offset >> 0) & 0xff; | ||
| 405 | |||
| 406 | desc[1] = 0x00; | ||
| 407 | desc[0] = 0x21; /* tran, valid */ | ||
| 408 | |||
| 409 | align += 4; | ||
| 410 | align_addr += 4; | ||
| 411 | |||
| 412 | desc += 8; | ||
| 413 | |||
| 414 | addr += offset; | ||
| 415 | len -= offset; | ||
| 416 | } | ||
| 417 | |||
| 418 | desc[7] = (addr >> 24) & 0xff; | ||
| 419 | desc[6] = (addr >> 16) & 0xff; | ||
| 420 | desc[5] = (addr >> 8) & 0xff; | ||
| 421 | desc[4] = (addr >> 0) & 0xff; | ||
| 422 | |||
| 423 | BUG_ON(len > 65536); | ||
| 424 | |||
| 425 | desc[3] = (len >> 8) & 0xff; | ||
| 426 | desc[2] = (len >> 0) & 0xff; | ||
| 427 | |||
| 428 | desc[1] = 0x00; | ||
| 429 | desc[0] = 0x21; /* tran, valid */ | ||
| 430 | |||
| 431 | desc += 8; | ||
| 432 | |||
| 433 | /* | ||
| 434 | * If this triggers then we have a calculation bug | ||
| 435 | * somewhere. :/ | ||
| 436 | */ | ||
| 437 | WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); | ||
| 438 | } | ||
| 439 | |||
| 440 | /* | ||
| 441 | * Add a terminating entry. | ||
| 442 | */ | ||
| 443 | desc[7] = 0; | ||
| 444 | desc[6] = 0; | ||
| 445 | desc[5] = 0; | ||
| 446 | desc[4] = 0; | ||
| 447 | |||
| 448 | desc[3] = 0; | ||
| 449 | desc[2] = 0; | ||
| 450 | |||
| 451 | desc[1] = 0x00; | ||
| 452 | desc[0] = 0x03; /* nop, end, valid */ | ||
| 453 | |||
| 454 | /* | ||
| 455 | * Resync align buffer as we might have changed it. | ||
| 456 | */ | ||
| 457 | if (data->flags & MMC_DATA_WRITE) { | ||
| 458 | dma_sync_single_for_device(mmc_dev(host->mmc), | ||
| 459 | host->align_addr, 128 * 4, direction); | ||
| 460 | } | ||
| 461 | |||
| 462 | host->adma_addr = dma_map_single(mmc_dev(host->mmc), | ||
| 463 | host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); | ||
| 464 | if (dma_mapping_error(host->align_addr)) | ||
| 465 | goto unmap_entries; | ||
| 466 | BUG_ON(host->adma_addr & 0x3); | ||
| 467 | |||
| 468 | return 0; | ||
| 469 | |||
| 470 | unmap_entries: | ||
| 471 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | ||
| 472 | data->sg_len, direction); | ||
| 473 | unmap_align: | ||
| 474 | dma_unmap_single(mmc_dev(host->mmc), host->align_addr, | ||
| 475 | 128 * 4, direction); | ||
| 476 | fail: | ||
| 477 | return -EINVAL; | ||
| 478 | } | ||
| 479 | |||
| 480 | static void sdhci_adma_table_post(struct sdhci_host *host, | ||
| 481 | struct mmc_data *data) | ||
| 482 | { | ||
| 483 | int direction; | ||
| 484 | |||
| 485 | struct scatterlist *sg; | ||
| 486 | int i, size; | ||
| 487 | u8 *align; | ||
| 488 | char *buffer; | ||
| 489 | unsigned long flags; | ||
| 490 | |||
| 491 | if (data->flags & MMC_DATA_READ) | ||
| 492 | direction = DMA_FROM_DEVICE; | ||
| 493 | else | ||
| 494 | direction = DMA_TO_DEVICE; | ||
| 495 | |||
| 496 | dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, | ||
| 497 | (128 * 2 + 1) * 4, DMA_TO_DEVICE); | ||
| 498 | |||
| 499 | dma_unmap_single(mmc_dev(host->mmc), host->align_addr, | ||
| 500 | 128 * 4, direction); | ||
| 501 | |||
| 502 | if (data->flags & MMC_DATA_READ) { | ||
| 503 | dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, | ||
| 504 | data->sg_len, direction); | ||
| 505 | |||
| 506 | align = host->align_buffer; | ||
| 507 | |||
| 508 | for_each_sg(data->sg, sg, host->sg_count, i) { | ||
| 509 | if (sg_dma_address(sg) & 0x3) { | ||
| 510 | size = 4 - (sg_dma_address(sg) & 0x3); | ||
| 511 | |||
| 512 | buffer = sdhci_kmap_atomic(sg, &flags); | ||
| 513 | memcpy(buffer, align, size); | ||
| 514 | sdhci_kunmap_atomic(buffer, &flags); | ||
| 515 | |||
| 516 | align += 4; | ||
| 517 | } | ||
| 518 | } | ||
| 519 | } | ||
| 520 | |||
| 521 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | ||
| 522 | data->sg_len, direction); | ||
| 523 | } | ||
| 524 | |||
| 525 | static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) | ||
| 526 | { | ||
| 527 | u8 count; | ||
| 528 | unsigned target_timeout, current_timeout; | ||
| 529 | |||
| 530 | /* | ||
| 531 | * If the host controller provides us with an incorrect timeout | ||
| 532 | * value, just skip the check and use 0xE. The hardware may take | ||
| 533 | * longer to time out, but that's much better than having a too-short | ||
| 534 | * timeout value. | ||
| 535 | */ | ||
| 536 | if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)) | ||
| 537 | return 0xE; | ||
| 463 | 538 | ||
| 464 | /* timeout in us */ | 539 | /* timeout in us */ |
| 465 | target_timeout = data->timeout_ns / 1000 + | 540 | target_timeout = data->timeout_ns / 1000 + |
| @@ -484,52 +559,158 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | |||
| 484 | break; | 559 | break; |
| 485 | } | 560 | } |
| 486 | 561 | ||
| 487 | /* | ||
| 488 | * Compensate for an off-by-one error in the CaFe hardware; otherwise, | ||
| 489 | * a too-small count gives us interrupt timeouts. | ||
| 490 | */ | ||
| 491 | if ((host->chip->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL)) | ||
| 492 | count++; | ||
| 493 | |||
| 494 | if (count >= 0xF) { | 562 | if (count >= 0xF) { |
| 495 | printk(KERN_WARNING "%s: Too large timeout requested!\n", | 563 | printk(KERN_WARNING "%s: Too large timeout requested!\n", |
| 496 | mmc_hostname(host->mmc)); | 564 | mmc_hostname(host->mmc)); |
| 497 | count = 0xE; | 565 | count = 0xE; |
| 498 | } | 566 | } |
| 499 | 567 | ||
| 568 | return count; | ||
| 569 | } | ||
| 570 | |||
| 571 | static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | ||
| 572 | { | ||
| 573 | u8 count; | ||
| 574 | u8 ctrl; | ||
| 575 | int ret; | ||
| 576 | |||
| 577 | WARN_ON(host->data); | ||
| 578 | |||
| 579 | if (data == NULL) | ||
| 580 | return; | ||
| 581 | |||
| 582 | /* Sanity checks */ | ||
| 583 | BUG_ON(data->blksz * data->blocks > 524288); | ||
| 584 | BUG_ON(data->blksz > host->mmc->max_blk_size); | ||
| 585 | BUG_ON(data->blocks > 65535); | ||
| 586 | |||
| 587 | host->data = data; | ||
| 588 | host->data_early = 0; | ||
| 589 | |||
| 590 | count = sdhci_calc_timeout(host, data); | ||
| 500 | writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); | 591 | writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); |
| 501 | 592 | ||
| 502 | if (host->flags & SDHCI_USE_DMA) | 593 | if (host->flags & SDHCI_USE_DMA) |
| 503 | host->flags |= SDHCI_REQ_USE_DMA; | 594 | host->flags |= SDHCI_REQ_USE_DMA; |
| 504 | 595 | ||
| 505 | if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && | 596 | /* |
| 506 | (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && | 597 | * FIXME: This doesn't account for merging when mapping the |
| 507 | ((data->blksz * data->blocks) & 0x3))) { | 598 | * scatterlist. |
| 508 | DBG("Reverting to PIO because of transfer size (%d)\n", | 599 | */ |
| 509 | data->blksz * data->blocks); | 600 | if (host->flags & SDHCI_REQ_USE_DMA) { |
| 510 | host->flags &= ~SDHCI_REQ_USE_DMA; | 601 | int broken, i; |
| 602 | struct scatterlist *sg; | ||
| 603 | |||
| 604 | broken = 0; | ||
| 605 | if (host->flags & SDHCI_USE_ADMA) { | ||
| 606 | if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) | ||
| 607 | broken = 1; | ||
| 608 | } else { | ||
| 609 | if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) | ||
| 610 | broken = 1; | ||
| 611 | } | ||
| 612 | |||
| 613 | if (unlikely(broken)) { | ||
| 614 | for_each_sg(data->sg, sg, data->sg_len, i) { | ||
| 615 | if (sg->length & 0x3) { | ||
| 616 | DBG("Reverting to PIO because of " | ||
| 617 | "transfer size (%d)\n", | ||
| 618 | sg->length); | ||
| 619 | host->flags &= ~SDHCI_REQ_USE_DMA; | ||
| 620 | break; | ||
| 621 | } | ||
| 622 | } | ||
| 623 | } | ||
| 511 | } | 624 | } |
| 512 | 625 | ||
| 513 | /* | 626 | /* |
| 514 | * The assumption here being that alignment is the same after | 627 | * The assumption here being that alignment is the same after |
| 515 | * translation to device address space. | 628 | * translation to device address space. |
| 516 | */ | 629 | */ |
| 517 | if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && | 630 | if (host->flags & SDHCI_REQ_USE_DMA) { |
| 518 | (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && | 631 | int broken, i; |
| 519 | (data->sg->offset & 0x3))) { | 632 | struct scatterlist *sg; |
| 520 | DBG("Reverting to PIO because of bad alignment\n"); | 633 | |
| 521 | host->flags &= ~SDHCI_REQ_USE_DMA; | 634 | broken = 0; |
| 635 | if (host->flags & SDHCI_USE_ADMA) { | ||
| 636 | /* | ||
| 637 | * As we use 3 byte chunks to work around | ||
| 638 | * alignment problems, we need to check this | ||
| 639 | * quirk. | ||
| 640 | */ | ||
| 641 | if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) | ||
| 642 | broken = 1; | ||
| 643 | } else { | ||
| 644 | if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) | ||
| 645 | broken = 1; | ||
| 646 | } | ||
| 647 | |||
| 648 | if (unlikely(broken)) { | ||
| 649 | for_each_sg(data->sg, sg, data->sg_len, i) { | ||
| 650 | if (sg->offset & 0x3) { | ||
| 651 | DBG("Reverting to PIO because of " | ||
| 652 | "bad alignment\n"); | ||
| 653 | host->flags &= ~SDHCI_REQ_USE_DMA; | ||
| 654 | break; | ||
| 655 | } | ||
| 656 | } | ||
| 657 | } | ||
| 522 | } | 658 | } |
| 523 | 659 | ||
| 524 | if (host->flags & SDHCI_REQ_USE_DMA) { | 660 | if (host->flags & SDHCI_REQ_USE_DMA) { |
| 525 | int count; | 661 | if (host->flags & SDHCI_USE_ADMA) { |
| 662 | ret = sdhci_adma_table_pre(host, data); | ||
| 663 | if (ret) { | ||
| 664 | /* | ||
| 665 | * This only happens when someone fed | ||
| 666 | * us an invalid request. | ||
| 667 | */ | ||
| 668 | WARN_ON(1); | ||
| 669 | host->flags &= ~SDHCI_USE_DMA; | ||
| 670 | } else { | ||
| 671 | writel(host->adma_addr, | ||
| 672 | host->ioaddr + SDHCI_ADMA_ADDRESS); | ||
| 673 | } | ||
| 674 | } else { | ||
| 675 | int sg_cnt; | ||
| 676 | |||
| 677 | sg_cnt = dma_map_sg(mmc_dev(host->mmc), | ||
| 678 | data->sg, data->sg_len, | ||
| 679 | (data->flags & MMC_DATA_READ) ? | ||
| 680 | DMA_FROM_DEVICE : | ||
| 681 | DMA_TO_DEVICE); | ||
| 682 | if (sg_cnt == 0) { | ||
| 683 | /* | ||
| 684 | * This only happens when someone fed | ||
| 685 | * us an invalid request. | ||
| 686 | */ | ||
| 687 | WARN_ON(1); | ||
| 688 | host->flags &= ~SDHCI_USE_DMA; | ||
| 689 | } else { | ||
| 690 | WARN_ON(count != 1); | ||
| 691 | writel(sg_dma_address(data->sg), | ||
| 692 | host->ioaddr + SDHCI_DMA_ADDRESS); | ||
| 693 | } | ||
| 694 | } | ||
| 695 | } | ||
| 526 | 696 | ||
| 527 | count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, | 697 | /* |
| 528 | (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); | 698 | * Always adjust the DMA selection as some controllers |
| 529 | BUG_ON(count != 1); | 699 | * (e.g. JMicron) can't do PIO properly when the selection |
| 700 | * is ADMA. | ||
| 701 | */ | ||
| 702 | if (host->version >= SDHCI_SPEC_200) { | ||
| 703 | ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); | ||
| 704 | ctrl &= ~SDHCI_CTRL_DMA_MASK; | ||
| 705 | if ((host->flags & SDHCI_REQ_USE_DMA) && | ||
| 706 | (host->flags & SDHCI_USE_ADMA)) | ||
| 707 | ctrl |= SDHCI_CTRL_ADMA32; | ||
| 708 | else | ||
| 709 | ctrl |= SDHCI_CTRL_SDMA; | ||
| 710 | writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); | ||
| 711 | } | ||
| 530 | 712 | ||
| 531 | writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS); | 713 | if (!(host->flags & SDHCI_REQ_USE_DMA)) { |
| 532 | } else { | ||
| 533 | host->cur_sg = data->sg; | 714 | host->cur_sg = data->sg; |
| 534 | host->num_sg = data->sg_len; | 715 | host->num_sg = data->sg_len; |
| 535 | 716 | ||
| @@ -567,7 +748,6 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, | |||
| 567 | static void sdhci_finish_data(struct sdhci_host *host) | 748 | static void sdhci_finish_data(struct sdhci_host *host) |
| 568 | { | 749 | { |
| 569 | struct mmc_data *data; | 750 | struct mmc_data *data; |
| 570 | u16 blocks; | ||
| 571 | 751 | ||
| 572 | BUG_ON(!host->data); | 752 | BUG_ON(!host->data); |
| 573 | 753 | ||
| @@ -575,25 +755,26 @@ static void sdhci_finish_data(struct sdhci_host *host) | |||
| 575 | host->data = NULL; | 755 | host->data = NULL; |
| 576 | 756 | ||
| 577 | if (host->flags & SDHCI_REQ_USE_DMA) { | 757 | if (host->flags & SDHCI_REQ_USE_DMA) { |
| 578 | pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, | 758 | if (host->flags & SDHCI_USE_ADMA) |
| 579 | (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); | 759 | sdhci_adma_table_post(host, data); |
| 760 | else { | ||
| 761 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | ||
| 762 | data->sg_len, (data->flags & MMC_DATA_READ) ? | ||
| 763 | DMA_FROM_DEVICE : DMA_TO_DEVICE); | ||
| 764 | } | ||
| 580 | } | 765 | } |
| 581 | 766 | ||
| 582 | /* | 767 | /* |
| 583 | * Controller doesn't count down when in single block mode. | 768 | * The specification states that the block count register must |
| 769 | * be updated, but it does not specify at what point in the | ||
| 770 | * data flow. That makes the register entirely useless to read | ||
| 771 | * back so we have to assume that nothing made it to the card | ||
| 772 | * in the event of an error. | ||
| 584 | */ | 773 | */ |
| 585 | if (data->blocks == 1) | 774 | if (data->error) |
| 586 | blocks = (data->error == 0) ? 0 : 1; | 775 | data->bytes_xfered = 0; |
| 587 | else | 776 | else |
| 588 | blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT); | 777 | data->bytes_xfered = data->blksz * data->blocks; |
| 589 | data->bytes_xfered = data->blksz * (data->blocks - blocks); | ||
| 590 | |||
| 591 | if (!data->error && blocks) { | ||
| 592 | printk(KERN_ERR "%s: Controller signalled completion even " | ||
| 593 | "though there were blocks left.\n", | ||
| 594 | mmc_hostname(host->mmc)); | ||
| 595 | data->error = -EIO; | ||
| 596 | } | ||
| 597 | 778 | ||
| 598 | if (data->stop) { | 779 | if (data->stop) { |
| 599 | /* | 780 | /* |
| @@ -775,7 +956,7 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) | |||
| 775 | * Spec says that we should clear the power reg before setting | 956 | * Spec says that we should clear the power reg before setting |
| 776 | * a new value. Some controllers don't seem to like this though. | 957 | * a new value. Some controllers don't seem to like this though. |
| 777 | */ | 958 | */ |
| 778 | if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) | 959 | if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) |
| 779 | writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); | 960 | writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); |
| 780 | 961 | ||
| 781 | pwr = SDHCI_POWER_ON; | 962 | pwr = SDHCI_POWER_ON; |
| @@ -797,10 +978,10 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) | |||
| 797 | } | 978 | } |
| 798 | 979 | ||
| 799 | /* | 980 | /* |
| 800 | * At least the CaFe chip gets confused if we set the voltage | 981 | * At least the Marvell CaFe chip gets confused if we set the voltage |
| 801 | * and set turn on power at the same time, so set the voltage first. | 982 | * and set turn on power at the same time, so set the voltage first. |
| 802 | */ | 983 | */ |
| 803 | if ((host->chip->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) | 984 | if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) |
| 804 | writeb(pwr & ~SDHCI_POWER_ON, | 985 | writeb(pwr & ~SDHCI_POWER_ON, |
| 805 | host->ioaddr + SDHCI_POWER_CONTROL); | 986 | host->ioaddr + SDHCI_POWER_CONTROL); |
| 806 | 987 | ||
| @@ -833,7 +1014,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
| 833 | 1014 | ||
| 834 | host->mrq = mrq; | 1015 | host->mrq = mrq; |
| 835 | 1016 | ||
| 836 | if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { | 1017 | if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT) |
| 1018 | || (host->flags & SDHCI_DEVICE_DEAD)) { | ||
| 837 | host->mrq->cmd->error = -ENOMEDIUM; | 1019 | host->mrq->cmd->error = -ENOMEDIUM; |
| 838 | tasklet_schedule(&host->finish_tasklet); | 1020 | tasklet_schedule(&host->finish_tasklet); |
| 839 | } else | 1021 | } else |
| @@ -853,6 +1035,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 853 | 1035 | ||
| 854 | spin_lock_irqsave(&host->lock, flags); | 1036 | spin_lock_irqsave(&host->lock, flags); |
| 855 | 1037 | ||
| 1038 | if (host->flags & SDHCI_DEVICE_DEAD) | ||
| 1039 | goto out; | ||
| 1040 | |||
| 856 | /* | 1041 | /* |
| 857 | * Reset the chip on each power off. | 1042 | * Reset the chip on each power off. |
| 858 | * Should clear out any weird states. | 1043 | * Should clear out any weird states. |
| @@ -888,9 +1073,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 888 | * signalling timeout and CRC errors even on CMD0. Resetting | 1073 | * signalling timeout and CRC errors even on CMD0. Resetting |
| 889 | * it on each ios seems to solve the problem. | 1074 | * it on each ios seems to solve the problem. |
| 890 | */ | 1075 | */ |
| 891 | if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) | 1076 | if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) |
| 892 | sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); | 1077 | sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); |
| 893 | 1078 | ||
| 1079 | out: | ||
| 894 | mmiowb(); | 1080 | mmiowb(); |
| 895 | spin_unlock_irqrestore(&host->lock, flags); | 1081 | spin_unlock_irqrestore(&host->lock, flags); |
| 896 | } | 1082 | } |
| @@ -905,7 +1091,10 @@ static int sdhci_get_ro(struct mmc_host *mmc) | |||
| 905 | 1091 | ||
| 906 | spin_lock_irqsave(&host->lock, flags); | 1092 | spin_lock_irqsave(&host->lock, flags); |
| 907 | 1093 | ||
| 908 | present = readl(host->ioaddr + SDHCI_PRESENT_STATE); | 1094 | if (host->flags & SDHCI_DEVICE_DEAD) |
| 1095 | present = 0; | ||
| 1096 | else | ||
| 1097 | present = readl(host->ioaddr + SDHCI_PRESENT_STATE); | ||
| 909 | 1098 | ||
| 910 | spin_unlock_irqrestore(&host->lock, flags); | 1099 | spin_unlock_irqrestore(&host->lock, flags); |
| 911 | 1100 | ||
| @@ -922,6 +1111,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
| 922 | 1111 | ||
| 923 | spin_lock_irqsave(&host->lock, flags); | 1112 | spin_lock_irqsave(&host->lock, flags); |
| 924 | 1113 | ||
| 1114 | if (host->flags & SDHCI_DEVICE_DEAD) | ||
| 1115 | goto out; | ||
| 1116 | |||
| 925 | ier = readl(host->ioaddr + SDHCI_INT_ENABLE); | 1117 | ier = readl(host->ioaddr + SDHCI_INT_ENABLE); |
| 926 | 1118 | ||
| 927 | ier &= ~SDHCI_INT_CARD_INT; | 1119 | ier &= ~SDHCI_INT_CARD_INT; |
| @@ -931,6 +1123,7 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
| 931 | writel(ier, host->ioaddr + SDHCI_INT_ENABLE); | 1123 | writel(ier, host->ioaddr + SDHCI_INT_ENABLE); |
| 932 | writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE); | 1124 | writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE); |
| 933 | 1125 | ||
| 1126 | out: | ||
| 934 | mmiowb(); | 1127 | mmiowb(); |
| 935 | 1128 | ||
| 936 | spin_unlock_irqrestore(&host->lock, flags); | 1129 | spin_unlock_irqrestore(&host->lock, flags); |
| @@ -996,13 +1189,14 @@ static void sdhci_tasklet_finish(unsigned long param) | |||
| 996 | * The controller needs a reset of internal state machines | 1189 | * The controller needs a reset of internal state machines |
| 997 | * upon error conditions. | 1190 | * upon error conditions. |
| 998 | */ | 1191 | */ |
| 999 | if (mrq->cmd->error || | 1192 | if (!(host->flags & SDHCI_DEVICE_DEAD) && |
| 1000 | (mrq->data && (mrq->data->error || | 1193 | (mrq->cmd->error || |
| 1001 | (mrq->data->stop && mrq->data->stop->error))) || | 1194 | (mrq->data && (mrq->data->error || |
| 1002 | (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { | 1195 | (mrq->data->stop && mrq->data->stop->error))) || |
| 1196 | (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { | ||
| 1003 | 1197 | ||
| 1004 | /* Some controllers need this kick or reset won't work here */ | 1198 | /* Some controllers need this kick or reset won't work here */ |
| 1005 | if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { | 1199 | if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { |
| 1006 | unsigned int clock; | 1200 | unsigned int clock; |
| 1007 | 1201 | ||
| 1008 | /* This is to force an update */ | 1202 | /* This is to force an update */ |
| @@ -1116,6 +1310,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | |||
| 1116 | host->data->error = -ETIMEDOUT; | 1310 | host->data->error = -ETIMEDOUT; |
| 1117 | else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) | 1311 | else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) |
| 1118 | host->data->error = -EILSEQ; | 1312 | host->data->error = -EILSEQ; |
| 1313 | else if (intmask & SDHCI_INT_ADMA_ERROR) | ||
| 1314 | host->data->error = -EIO; | ||
| 1119 | 1315 | ||
| 1120 | if (host->data->error) | 1316 | if (host->data->error) |
| 1121 | sdhci_finish_data(host); | 1317 | sdhci_finish_data(host); |
| @@ -1234,218 +1430,167 @@ out: | |||
| 1234 | 1430 | ||
| 1235 | #ifdef CONFIG_PM | 1431 | #ifdef CONFIG_PM |
| 1236 | 1432 | ||
| 1237 | static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state) | 1433 | int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) |
| 1238 | { | 1434 | { |
| 1239 | struct sdhci_chip *chip; | 1435 | int ret; |
| 1240 | int i, ret; | ||
| 1241 | |||
| 1242 | chip = pci_get_drvdata(pdev); | ||
| 1243 | if (!chip) | ||
| 1244 | return 0; | ||
| 1245 | |||
| 1246 | DBG("Suspending...\n"); | ||
| 1247 | |||
| 1248 | for (i = 0;i < chip->num_slots;i++) { | ||
| 1249 | if (!chip->hosts[i]) | ||
| 1250 | continue; | ||
| 1251 | ret = mmc_suspend_host(chip->hosts[i]->mmc, state); | ||
| 1252 | if (ret) { | ||
| 1253 | for (i--;i >= 0;i--) | ||
| 1254 | mmc_resume_host(chip->hosts[i]->mmc); | ||
| 1255 | return ret; | ||
| 1256 | } | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | pci_save_state(pdev); | ||
| 1260 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | ||
| 1261 | 1436 | ||
| 1262 | for (i = 0;i < chip->num_slots;i++) { | 1437 | ret = mmc_suspend_host(host->mmc, state); |
| 1263 | if (!chip->hosts[i]) | 1438 | if (ret) |
| 1264 | continue; | 1439 | return ret; |
| 1265 | free_irq(chip->hosts[i]->irq, chip->hosts[i]); | ||
| 1266 | } | ||
| 1267 | 1440 | ||
| 1268 | pci_disable_device(pdev); | 1441 | free_irq(host->irq, host); |
| 1269 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
| 1270 | 1442 | ||
| 1271 | return 0; | 1443 | return 0; |
| 1272 | } | 1444 | } |
| 1273 | 1445 | ||
| 1274 | static int sdhci_resume (struct pci_dev *pdev) | 1446 | EXPORT_SYMBOL_GPL(sdhci_suspend_host); |
| 1275 | { | ||
| 1276 | struct sdhci_chip *chip; | ||
| 1277 | int i, ret; | ||
| 1278 | 1447 | ||
| 1279 | chip = pci_get_drvdata(pdev); | 1448 | int sdhci_resume_host(struct sdhci_host *host) |
| 1280 | if (!chip) | 1449 | { |
| 1281 | return 0; | 1450 | int ret; |
| 1282 | 1451 | ||
| 1283 | DBG("Resuming...\n"); | 1452 | if (host->flags & SDHCI_USE_DMA) { |
| 1453 | if (host->ops->enable_dma) | ||
| 1454 | host->ops->enable_dma(host); | ||
| 1455 | } | ||
| 1284 | 1456 | ||
| 1285 | pci_set_power_state(pdev, PCI_D0); | 1457 | ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, |
| 1286 | pci_restore_state(pdev); | 1458 | mmc_hostname(host->mmc), host); |
| 1287 | ret = pci_enable_device(pdev); | ||
| 1288 | if (ret) | 1459 | if (ret) |
| 1289 | return ret; | 1460 | return ret; |
| 1290 | 1461 | ||
| 1291 | for (i = 0;i < chip->num_slots;i++) { | 1462 | sdhci_init(host); |
| 1292 | if (!chip->hosts[i]) | 1463 | mmiowb(); |
| 1293 | continue; | 1464 | |
| 1294 | if (chip->hosts[i]->flags & SDHCI_USE_DMA) | 1465 | ret = mmc_resume_host(host->mmc); |
| 1295 | pci_set_master(pdev); | 1466 | if (ret) |
| 1296 | ret = request_irq(chip->hosts[i]->irq, sdhci_irq, | 1467 | return ret; |
| 1297 | IRQF_SHARED, mmc_hostname(chip->hosts[i]->mmc), | ||
| 1298 | chip->hosts[i]); | ||
| 1299 | if (ret) | ||
| 1300 | return ret; | ||
| 1301 | sdhci_init(chip->hosts[i]); | ||
| 1302 | mmiowb(); | ||
| 1303 | ret = mmc_resume_host(chip->hosts[i]->mmc); | ||
| 1304 | if (ret) | ||
| 1305 | return ret; | ||
| 1306 | } | ||
| 1307 | 1468 | ||
| 1308 | return 0; | 1469 | return 0; |
| 1309 | } | 1470 | } |
| 1310 | 1471 | ||
| 1311 | #else /* CONFIG_PM */ | 1472 | EXPORT_SYMBOL_GPL(sdhci_resume_host); |
| 1312 | |||
| 1313 | #define sdhci_suspend NULL | ||
| 1314 | #define sdhci_resume NULL | ||
| 1315 | 1473 | ||
| 1316 | #endif /* CONFIG_PM */ | 1474 | #endif /* CONFIG_PM */ |
| 1317 | 1475 | ||
| 1318 | /*****************************************************************************\ | 1476 | /*****************************************************************************\ |
| 1319 | * * | 1477 | * * |
| 1320 | * Device probing/removal * | 1478 | * Device allocation/registration * |
| 1321 | * * | 1479 | * * |
| 1322 | \*****************************************************************************/ | 1480 | \*****************************************************************************/ |
| 1323 | 1481 | ||
| 1324 | static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | 1482 | struct sdhci_host *sdhci_alloc_host(struct device *dev, |
| 1483 | size_t priv_size) | ||
| 1325 | { | 1484 | { |
| 1326 | int ret; | ||
| 1327 | unsigned int version; | ||
| 1328 | struct sdhci_chip *chip; | ||
| 1329 | struct mmc_host *mmc; | 1485 | struct mmc_host *mmc; |
| 1330 | struct sdhci_host *host; | 1486 | struct sdhci_host *host; |
| 1331 | 1487 | ||
| 1332 | u8 first_bar; | 1488 | WARN_ON(dev == NULL); |
| 1333 | unsigned int caps; | ||
| 1334 | |||
| 1335 | chip = pci_get_drvdata(pdev); | ||
| 1336 | BUG_ON(!chip); | ||
| 1337 | |||
| 1338 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); | ||
| 1339 | if (ret) | ||
| 1340 | return ret; | ||
| 1341 | |||
| 1342 | first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; | ||
| 1343 | |||
| 1344 | if (first_bar > 5) { | ||
| 1345 | printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n"); | ||
| 1346 | return -ENODEV; | ||
| 1347 | } | ||
| 1348 | |||
| 1349 | if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) { | ||
| 1350 | printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n"); | ||
| 1351 | return -ENODEV; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | if (pci_resource_len(pdev, first_bar + slot) != 0x100) { | ||
| 1355 | printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. " | ||
| 1356 | "You may experience problems.\n"); | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { | ||
| 1360 | printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n"); | ||
| 1361 | return -ENODEV; | ||
| 1362 | } | ||
| 1363 | |||
| 1364 | if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { | ||
| 1365 | printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n"); | ||
| 1366 | return -ENODEV; | ||
| 1367 | } | ||
| 1368 | 1489 | ||
| 1369 | mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev); | 1490 | mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); |
| 1370 | if (!mmc) | 1491 | if (!mmc) |
| 1371 | return -ENOMEM; | 1492 | return ERR_PTR(-ENOMEM); |
| 1372 | 1493 | ||
| 1373 | host = mmc_priv(mmc); | 1494 | host = mmc_priv(mmc); |
| 1374 | host->mmc = mmc; | 1495 | host->mmc = mmc; |
| 1375 | 1496 | ||
| 1376 | host->chip = chip; | 1497 | return host; |
| 1377 | chip->hosts[slot] = host; | 1498 | } |
| 1378 | 1499 | ||
| 1379 | host->bar = first_bar + slot; | 1500 | EXPORT_SYMBOL_GPL(sdhci_alloc_host); |
| 1380 | 1501 | ||
| 1381 | host->addr = pci_resource_start(pdev, host->bar); | 1502 | int sdhci_add_host(struct sdhci_host *host) |
| 1382 | host->irq = pdev->irq; | 1503 | { |
| 1504 | struct mmc_host *mmc; | ||
| 1505 | unsigned int caps; | ||
| 1506 | int ret; | ||
| 1383 | 1507 | ||
| 1384 | DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq); | 1508 | WARN_ON(host == NULL); |
| 1509 | if (host == NULL) | ||
| 1510 | return -EINVAL; | ||
| 1385 | 1511 | ||
| 1386 | ret = pci_request_region(pdev, host->bar, mmc_hostname(mmc)); | 1512 | mmc = host->mmc; |
| 1387 | if (ret) | ||
| 1388 | goto free; | ||
| 1389 | 1513 | ||
| 1390 | host->ioaddr = ioremap_nocache(host->addr, | 1514 | if (debug_quirks) |
| 1391 | pci_resource_len(pdev, host->bar)); | 1515 | host->quirks = debug_quirks; |
| 1392 | if (!host->ioaddr) { | ||
| 1393 | ret = -ENOMEM; | ||
| 1394 | goto release; | ||
| 1395 | } | ||
| 1396 | 1516 | ||
| 1397 | sdhci_reset(host, SDHCI_RESET_ALL); | 1517 | sdhci_reset(host, SDHCI_RESET_ALL); |
| 1398 | 1518 | ||
| 1399 | version = readw(host->ioaddr + SDHCI_HOST_VERSION); | 1519 | host->version = readw(host->ioaddr + SDHCI_HOST_VERSION); |
| 1400 | version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; | 1520 | host->version = (host->version & SDHCI_SPEC_VER_MASK) |
| 1401 | if (version > 1) { | 1521 | >> SDHCI_SPEC_VER_SHIFT; |
| 1522 | if (host->version > SDHCI_SPEC_200) { | ||
| 1402 | printk(KERN_ERR "%s: Unknown controller version (%d). " | 1523 | printk(KERN_ERR "%s: Unknown controller version (%d). " |
| 1403 | "You may experience problems.\n", mmc_hostname(mmc), | 1524 | "You may experience problems.\n", mmc_hostname(mmc), |
| 1404 | version); | 1525 | host->version); |
| 1405 | } | 1526 | } |
| 1406 | 1527 | ||
| 1407 | caps = readl(host->ioaddr + SDHCI_CAPABILITIES); | 1528 | caps = readl(host->ioaddr + SDHCI_CAPABILITIES); |
| 1408 | 1529 | ||
| 1409 | if (chip->quirks & SDHCI_QUIRK_FORCE_DMA) | 1530 | if (host->quirks & SDHCI_QUIRK_FORCE_DMA) |
| 1410 | host->flags |= SDHCI_USE_DMA; | 1531 | host->flags |= SDHCI_USE_DMA; |
| 1411 | else if (!(caps & SDHCI_CAN_DO_DMA)) | 1532 | else if (!(caps & SDHCI_CAN_DO_DMA)) |
| 1412 | DBG("Controller doesn't have DMA capability\n"); | 1533 | DBG("Controller doesn't have DMA capability\n"); |
| 1413 | else | 1534 | else |
| 1414 | host->flags |= SDHCI_USE_DMA; | 1535 | host->flags |= SDHCI_USE_DMA; |
| 1415 | 1536 | ||
| 1416 | if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) && | 1537 | if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && |
| 1417 | (host->flags & SDHCI_USE_DMA)) { | 1538 | (host->flags & SDHCI_USE_DMA)) { |
| 1418 | DBG("Disabling DMA as it is marked broken\n"); | 1539 | DBG("Disabling DMA as it is marked broken\n"); |
| 1419 | host->flags &= ~SDHCI_USE_DMA; | 1540 | host->flags &= ~SDHCI_USE_DMA; |
| 1420 | } | 1541 | } |
| 1421 | 1542 | ||
| 1422 | if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && | 1543 | if (host->flags & SDHCI_USE_DMA) { |
| 1423 | (host->flags & SDHCI_USE_DMA)) { | 1544 | if ((host->version >= SDHCI_SPEC_200) && |
| 1424 | printk(KERN_WARNING "%s: Will use DMA " | 1545 | (caps & SDHCI_CAN_DO_ADMA2)) |
| 1425 | "mode even though HW doesn't fully " | 1546 | host->flags |= SDHCI_USE_ADMA; |
| 1426 | "claim to support it.\n", mmc_hostname(mmc)); | 1547 | } |
| 1548 | |||
| 1549 | if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && | ||
| 1550 | (host->flags & SDHCI_USE_ADMA)) { | ||
| 1551 | DBG("Disabling ADMA as it is marked broken\n"); | ||
| 1552 | host->flags &= ~SDHCI_USE_ADMA; | ||
| 1427 | } | 1553 | } |
| 1428 | 1554 | ||
| 1429 | if (host->flags & SDHCI_USE_DMA) { | 1555 | if (host->flags & SDHCI_USE_DMA) { |
| 1430 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | 1556 | if (host->ops->enable_dma) { |
| 1431 | printk(KERN_WARNING "%s: No suitable DMA available. " | 1557 | if (host->ops->enable_dma(host)) { |
| 1432 | "Falling back to PIO.\n", mmc_hostname(mmc)); | 1558 | printk(KERN_WARNING "%s: No suitable DMA " |
| 1433 | host->flags &= ~SDHCI_USE_DMA; | 1559 | "available. Falling back to PIO.\n", |
| 1560 | mmc_hostname(mmc)); | ||
| 1561 | host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); | ||
| 1562 | } | ||
| 1434 | } | 1563 | } |
| 1435 | } | 1564 | } |
| 1436 | 1565 | ||
| 1437 | if (host->flags & SDHCI_USE_DMA) | 1566 | if (host->flags & SDHCI_USE_ADMA) { |
| 1438 | pci_set_master(pdev); | 1567 | /* |
| 1439 | else /* XXX: Hack to get MMC layer to avoid highmem */ | 1568 | * We need to allocate descriptors for all sg entries |
| 1440 | pdev->dma_mask = 0; | 1569 | * (128) and potentially one alignment transfer for |
| 1570 | * each of those entries. | ||
| 1571 | */ | ||
| 1572 | host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); | ||
| 1573 | host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); | ||
| 1574 | if (!host->adma_desc || !host->align_buffer) { | ||
| 1575 | kfree(host->adma_desc); | ||
| 1576 | kfree(host->align_buffer); | ||
| 1577 | printk(KERN_WARNING "%s: Unable to allocate ADMA " | ||
| 1578 | "buffers. Falling back to standard DMA.\n", | ||
| 1579 | mmc_hostname(mmc)); | ||
| 1580 | host->flags &= ~SDHCI_USE_ADMA; | ||
| 1581 | } | ||
| 1582 | } | ||
| 1583 | |||
| 1584 | /* XXX: Hack to get MMC layer to avoid highmem */ | ||
| 1585 | if (!(host->flags & SDHCI_USE_DMA)) | ||
| 1586 | mmc_dev(host->mmc)->dma_mask = NULL; | ||
| 1441 | 1587 | ||
| 1442 | host->max_clk = | 1588 | host->max_clk = |
| 1443 | (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; | 1589 | (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; |
| 1444 | if (host->max_clk == 0) { | 1590 | if (host->max_clk == 0) { |
| 1445 | printk(KERN_ERR "%s: Hardware doesn't specify base clock " | 1591 | printk(KERN_ERR "%s: Hardware doesn't specify base clock " |
| 1446 | "frequency.\n", mmc_hostname(mmc)); | 1592 | "frequency.\n", mmc_hostname(mmc)); |
| 1447 | ret = -ENODEV; | 1593 | return -ENODEV; |
| 1448 | goto unmap; | ||
| 1449 | } | 1594 | } |
| 1450 | host->max_clk *= 1000000; | 1595 | host->max_clk *= 1000000; |
| 1451 | 1596 | ||
| @@ -1454,8 +1599,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
| 1454 | if (host->timeout_clk == 0) { | 1599 | if (host->timeout_clk == 0) { |
| 1455 | printk(KERN_ERR "%s: Hardware doesn't specify timeout clock " | 1600 | printk(KERN_ERR "%s: Hardware doesn't specify timeout clock " |
| 1456 | "frequency.\n", mmc_hostname(mmc)); | 1601 | "frequency.\n", mmc_hostname(mmc)); |
| 1457 | ret = -ENODEV; | 1602 | return -ENODEV; |
| 1458 | goto unmap; | ||
| 1459 | } | 1603 | } |
| 1460 | if (caps & SDHCI_TIMEOUT_CLK_UNIT) | 1604 | if (caps & SDHCI_TIMEOUT_CLK_UNIT) |
| 1461 | host->timeout_clk *= 1000; | 1605 | host->timeout_clk *= 1000; |
| @@ -1466,7 +1610,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
| 1466 | mmc->ops = &sdhci_ops; | 1610 | mmc->ops = &sdhci_ops; |
| 1467 | mmc->f_min = host->max_clk / 256; | 1611 | mmc->f_min = host->max_clk / 256; |
| 1468 | mmc->f_max = host->max_clk; | 1612 | mmc->f_max = host->max_clk; |
| 1469 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ; | 1613 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; |
| 1470 | 1614 | ||
| 1471 | if (caps & SDHCI_CAN_DO_HISPD) | 1615 | if (caps & SDHCI_CAN_DO_HISPD) |
| 1472 | mmc->caps |= MMC_CAP_SD_HIGHSPEED; | 1616 | mmc->caps |= MMC_CAP_SD_HIGHSPEED; |
| @@ -1482,20 +1626,22 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
| 1482 | if (mmc->ocr_avail == 0) { | 1626 | if (mmc->ocr_avail == 0) { |
| 1483 | printk(KERN_ERR "%s: Hardware doesn't report any " | 1627 | printk(KERN_ERR "%s: Hardware doesn't report any " |
| 1484 | "support voltages.\n", mmc_hostname(mmc)); | 1628 | "support voltages.\n", mmc_hostname(mmc)); |
| 1485 | ret = -ENODEV; | 1629 | return -ENODEV; |
| 1486 | goto unmap; | ||
| 1487 | } | 1630 | } |
| 1488 | 1631 | ||
| 1489 | spin_lock_init(&host->lock); | 1632 | spin_lock_init(&host->lock); |
| 1490 | 1633 | ||
| 1491 | /* | 1634 | /* |
| 1492 | * Maximum number of segments. Hardware cannot do scatter lists. | 1635 | * Maximum number of segments. Depends on if the hardware |
| 1636 | * can do scatter/gather or not. | ||
| 1493 | */ | 1637 | */ |
| 1494 | if (host->flags & SDHCI_USE_DMA) | 1638 | if (host->flags & SDHCI_USE_ADMA) |
| 1639 | mmc->max_hw_segs = 128; | ||
| 1640 | else if (host->flags & SDHCI_USE_DMA) | ||
| 1495 | mmc->max_hw_segs = 1; | 1641 | mmc->max_hw_segs = 1; |
| 1496 | else | 1642 | else /* PIO */ |
| 1497 | mmc->max_hw_segs = 16; | 1643 | mmc->max_hw_segs = 128; |
| 1498 | mmc->max_phys_segs = 16; | 1644 | mmc->max_phys_segs = 128; |
| 1499 | 1645 | ||
| 1500 | /* | 1646 | /* |
| 1501 | * Maximum number of sectors in one transfer. Limited by DMA boundary | 1647 | * Maximum number of sectors in one transfer. Limited by DMA boundary |
| @@ -1505,9 +1651,13 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
| 1505 | 1651 | ||
| 1506 | /* | 1652 | /* |
| 1507 | * Maximum segment size. Could be one segment with the maximum number | 1653 | * Maximum segment size. Could be one segment with the maximum number |
| 1508 | * of bytes. | 1654 | * of bytes. When doing hardware scatter/gather, each entry cannot |
| 1655 | * be larger than 64 KiB though. | ||
| 1509 | */ | 1656 | */ |
| 1510 | mmc->max_seg_size = mmc->max_req_size; | 1657 | if (host->flags & SDHCI_USE_ADMA) |
| 1658 | mmc->max_seg_size = 65536; | ||
| 1659 | else | ||
| 1660 | mmc->max_seg_size = mmc->max_req_size; | ||
| 1511 | 1661 | ||
| 1512 | /* | 1662 | /* |
| 1513 | * Maximum block size. This varies from controller to controller and | 1663 | * Maximum block size. This varies from controller to controller and |
| @@ -1553,7 +1703,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
| 1553 | host->led.default_trigger = mmc_hostname(mmc); | 1703 | host->led.default_trigger = mmc_hostname(mmc); |
| 1554 | host->led.brightness_set = sdhci_led_control; | 1704 | host->led.brightness_set = sdhci_led_control; |
| 1555 | 1705 | ||
| 1556 | ret = led_classdev_register(&pdev->dev, &host->led); | 1706 | ret = led_classdev_register(mmc_dev(mmc), &host->led); |
| 1557 | if (ret) | 1707 | if (ret) |
| 1558 | goto reset; | 1708 | goto reset; |
| 1559 | #endif | 1709 | #endif |
| @@ -1562,8 +1712,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
| 1562 | 1712 | ||
| 1563 | mmc_add_host(mmc); | 1713 | mmc_add_host(mmc); |
| 1564 | 1714 | ||
| 1565 | printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", | 1715 | printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", |
| 1566 | mmc_hostname(mmc), host->addr, host->irq, | 1716 | mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, |
| 1717 | (host->flags & SDHCI_USE_ADMA)?"A":"", | ||
| 1567 | (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); | 1718 | (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); |
| 1568 | 1719 | ||
| 1569 | return 0; | 1720 | return 0; |
| @@ -1576,35 +1727,40 @@ reset: | |||
| 1576 | untasklet: | 1727 | untasklet: |
| 1577 | tasklet_kill(&host->card_tasklet); | 1728 | tasklet_kill(&host->card_tasklet); |
| 1578 | tasklet_kill(&host->finish_tasklet); | 1729 | tasklet_kill(&host->finish_tasklet); |
| 1579 | unmap: | ||
| 1580 | iounmap(host->ioaddr); | ||
| 1581 | release: | ||
| 1582 | pci_release_region(pdev, host->bar); | ||
| 1583 | free: | ||
| 1584 | mmc_free_host(mmc); | ||
| 1585 | 1730 | ||
| 1586 | return ret; | 1731 | return ret; |
| 1587 | } | 1732 | } |
| 1588 | 1733 | ||
| 1589 | static void sdhci_remove_slot(struct pci_dev *pdev, int slot) | 1734 | EXPORT_SYMBOL_GPL(sdhci_add_host); |
| 1735 | |||
| 1736 | void sdhci_remove_host(struct sdhci_host *host, int dead) | ||
| 1590 | { | 1737 | { |
| 1591 | struct sdhci_chip *chip; | 1738 | unsigned long flags; |
| 1592 | struct mmc_host *mmc; | ||
| 1593 | struct sdhci_host *host; | ||
| 1594 | 1739 | ||
| 1595 | chip = pci_get_drvdata(pdev); | 1740 | if (dead) { |
| 1596 | host = chip->hosts[slot]; | 1741 | spin_lock_irqsave(&host->lock, flags); |
| 1597 | mmc = host->mmc; | 1742 | |
| 1743 | host->flags |= SDHCI_DEVICE_DEAD; | ||
| 1744 | |||
| 1745 | if (host->mrq) { | ||
| 1746 | printk(KERN_ERR "%s: Controller removed during " | ||
| 1747 | " transfer!\n", mmc_hostname(host->mmc)); | ||
| 1598 | 1748 | ||
| 1599 | chip->hosts[slot] = NULL; | 1749 | host->mrq->cmd->error = -ENOMEDIUM; |
| 1750 | tasklet_schedule(&host->finish_tasklet); | ||
| 1751 | } | ||
| 1752 | |||
| 1753 | spin_unlock_irqrestore(&host->lock, flags); | ||
| 1754 | } | ||
| 1600 | 1755 | ||
| 1601 | mmc_remove_host(mmc); | 1756 | mmc_remove_host(host->mmc); |
| 1602 | 1757 | ||
| 1603 | #ifdef CONFIG_LEDS_CLASS | 1758 | #ifdef CONFIG_LEDS_CLASS |
| 1604 | led_classdev_unregister(&host->led); | 1759 | led_classdev_unregister(&host->led); |
| 1605 | #endif | 1760 | #endif |
| 1606 | 1761 | ||
| 1607 | sdhci_reset(host, SDHCI_RESET_ALL); | 1762 | if (!dead) |
| 1763 | sdhci_reset(host, SDHCI_RESET_ALL); | ||
| 1608 | 1764 | ||
| 1609 | free_irq(host->irq, host); | 1765 | free_irq(host->irq, host); |
| 1610 | 1766 | ||
| @@ -1613,106 +1769,21 @@ static void sdhci_remove_slot(struct pci_dev *pdev, int slot) | |||
| 1613 | tasklet_kill(&host->card_tasklet); | 1769 | tasklet_kill(&host->card_tasklet); |
| 1614 | tasklet_kill(&host->finish_tasklet); | 1770 | tasklet_kill(&host->finish_tasklet); |
| 1615 | 1771 | ||
| 1616 | iounmap(host->ioaddr); | 1772 | kfree(host->adma_desc); |
| 1617 | 1773 | kfree(host->align_buffer); | |
| 1618 | pci_release_region(pdev, host->bar); | ||
| 1619 | 1774 | ||
| 1620 | mmc_free_host(mmc); | 1775 | host->adma_desc = NULL; |
| 1776 | host->align_buffer = NULL; | ||
| 1621 | } | 1777 | } |
| 1622 | 1778 | ||
| 1623 | static int __devinit sdhci_probe(struct pci_dev *pdev, | 1779 | EXPORT_SYMBOL_GPL(sdhci_remove_host); |
| 1624 | const struct pci_device_id *ent) | ||
| 1625 | { | ||
| 1626 | int ret, i; | ||
| 1627 | u8 slots, rev; | ||
| 1628 | struct sdhci_chip *chip; | ||
| 1629 | |||
| 1630 | BUG_ON(pdev == NULL); | ||
| 1631 | BUG_ON(ent == NULL); | ||
| 1632 | 1780 | ||
| 1633 | pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); | 1781 | void sdhci_free_host(struct sdhci_host *host) |
| 1634 | |||
| 1635 | printk(KERN_INFO DRIVER_NAME | ||
| 1636 | ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n", | ||
| 1637 | pci_name(pdev), (int)pdev->vendor, (int)pdev->device, | ||
| 1638 | (int)rev); | ||
| 1639 | |||
| 1640 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); | ||
| 1641 | if (ret) | ||
| 1642 | return ret; | ||
| 1643 | |||
| 1644 | slots = PCI_SLOT_INFO_SLOTS(slots) + 1; | ||
| 1645 | DBG("found %d slot(s)\n", slots); | ||
| 1646 | if (slots == 0) | ||
| 1647 | return -ENODEV; | ||
| 1648 | |||
| 1649 | ret = pci_enable_device(pdev); | ||
| 1650 | if (ret) | ||
| 1651 | return ret; | ||
| 1652 | |||
| 1653 | chip = kzalloc(sizeof(struct sdhci_chip) + | ||
| 1654 | sizeof(struct sdhci_host*) * slots, GFP_KERNEL); | ||
| 1655 | if (!chip) { | ||
| 1656 | ret = -ENOMEM; | ||
| 1657 | goto err; | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | chip->pdev = pdev; | ||
| 1661 | chip->quirks = ent->driver_data; | ||
| 1662 | |||
| 1663 | if (debug_quirks) | ||
| 1664 | chip->quirks = debug_quirks; | ||
| 1665 | |||
| 1666 | chip->num_slots = slots; | ||
| 1667 | pci_set_drvdata(pdev, chip); | ||
| 1668 | |||
| 1669 | for (i = 0;i < slots;i++) { | ||
| 1670 | ret = sdhci_probe_slot(pdev, i); | ||
| 1671 | if (ret) { | ||
| 1672 | for (i--;i >= 0;i--) | ||
| 1673 | sdhci_remove_slot(pdev, i); | ||
| 1674 | goto free; | ||
| 1675 | } | ||
| 1676 | } | ||
| 1677 | |||
| 1678 | return 0; | ||
| 1679 | |||
| 1680 | free: | ||
| 1681 | pci_set_drvdata(pdev, NULL); | ||
| 1682 | kfree(chip); | ||
| 1683 | |||
| 1684 | err: | ||
| 1685 | pci_disable_device(pdev); | ||
| 1686 | return ret; | ||
| 1687 | } | ||
| 1688 | |||
| 1689 | static void __devexit sdhci_remove(struct pci_dev *pdev) | ||
| 1690 | { | 1782 | { |
| 1691 | int i; | 1783 | mmc_free_host(host->mmc); |
| 1692 | struct sdhci_chip *chip; | ||
| 1693 | |||
| 1694 | chip = pci_get_drvdata(pdev); | ||
| 1695 | |||
| 1696 | if (chip) { | ||
| 1697 | for (i = 0;i < chip->num_slots;i++) | ||
| 1698 | sdhci_remove_slot(pdev, i); | ||
| 1699 | |||
| 1700 | pci_set_drvdata(pdev, NULL); | ||
| 1701 | |||
| 1702 | kfree(chip); | ||
| 1703 | } | ||
| 1704 | |||
| 1705 | pci_disable_device(pdev); | ||
| 1706 | } | 1784 | } |
| 1707 | 1785 | ||
| 1708 | static struct pci_driver sdhci_driver = { | 1786 | EXPORT_SYMBOL_GPL(sdhci_free_host); |
| 1709 | .name = DRIVER_NAME, | ||
| 1710 | .id_table = pci_ids, | ||
| 1711 | .probe = sdhci_probe, | ||
| 1712 | .remove = __devexit_p(sdhci_remove), | ||
| 1713 | .suspend = sdhci_suspend, | ||
| 1714 | .resume = sdhci_resume, | ||
| 1715 | }; | ||
| 1716 | 1787 | ||
| 1717 | /*****************************************************************************\ | 1788 | /*****************************************************************************\ |
| 1718 | * * | 1789 | * * |
| @@ -1726,14 +1797,11 @@ static int __init sdhci_drv_init(void) | |||
| 1726 | ": Secure Digital Host Controller Interface driver\n"); | 1797 | ": Secure Digital Host Controller Interface driver\n"); |
| 1727 | printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); | 1798 | printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); |
| 1728 | 1799 | ||
| 1729 | return pci_register_driver(&sdhci_driver); | 1800 | return 0; |
| 1730 | } | 1801 | } |
| 1731 | 1802 | ||
| 1732 | static void __exit sdhci_drv_exit(void) | 1803 | static void __exit sdhci_drv_exit(void) |
| 1733 | { | 1804 | { |
| 1734 | DBG("Exiting\n"); | ||
| 1735 | |||
| 1736 | pci_unregister_driver(&sdhci_driver); | ||
| 1737 | } | 1805 | } |
| 1738 | 1806 | ||
| 1739 | module_init(sdhci_drv_init); | 1807 | module_init(sdhci_drv_init); |
| @@ -1742,7 +1810,7 @@ module_exit(sdhci_drv_exit); | |||
| 1742 | module_param(debug_quirks, uint, 0444); | 1810 | module_param(debug_quirks, uint, 0444); |
| 1743 | 1811 | ||
| 1744 | MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); | 1812 | MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); |
| 1745 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver"); | 1813 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); |
| 1746 | MODULE_LICENSE("GPL"); | 1814 | MODULE_LICENSE("GPL"); |
| 1747 | 1815 | ||
| 1748 | MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); | 1816 | MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); |
