diff options
author | John W. Linville <linville@tuxdriver.com> | 2009-12-29 14:07:42 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-12-29 14:07:42 -0500 |
commit | 55afc80b2ab100618c17af77915f75307b6bd5d1 (patch) | |
tree | 61521aeabc56788ceac926a15d0d6d081b6a6da8 /drivers/net/wireless/b43/dma.c | |
parent | 6c853da3f30c93eae847ecbcd9fdf10ba0da04c2 (diff) |
Revert "b43: Enforce DMA descriptor memory constraints"
This reverts commit 9bd568a50c446433038dec2a5186c5c57c3dbd23.
That commit is shown to cause allocation failures during initialization
on some machines.
http://bugzilla.kernel.org/show_bug.cgi?id=14844
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r-- | drivers/net/wireless/b43/dma.c | 197 |
1 files changed, 45 insertions, 152 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 027be275e035..88d1fd02d40a 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -383,160 +383,44 @@ static inline | |||
383 | } | 383 | } |
384 | } | 384 | } |
385 | 385 | ||
386 | /* Check if a DMA region fits the device constraints. | ||
387 | * Returns true, if the region is OK for usage with this device. */ | ||
388 | static inline bool b43_dma_address_ok(struct b43_dmaring *ring, | ||
389 | dma_addr_t addr, size_t size) | ||
390 | { | ||
391 | switch (ring->type) { | ||
392 | case B43_DMA_30BIT: | ||
393 | if ((u64)addr + size > (1ULL << 30)) | ||
394 | return 0; | ||
395 | break; | ||
396 | case B43_DMA_32BIT: | ||
397 | if ((u64)addr + size > (1ULL << 32)) | ||
398 | return 0; | ||
399 | break; | ||
400 | case B43_DMA_64BIT: | ||
401 | /* Currently we can't have addresses beyond | ||
402 | * 64bit in the kernel. */ | ||
403 | break; | ||
404 | } | ||
405 | return 1; | ||
406 | } | ||
407 | |||
408 | #define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0) | ||
409 | #define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0) | ||
410 | |||
411 | static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base, | ||
412 | dma_addr_t dmaaddr, size_t size) | ||
413 | { | ||
414 | ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE); | ||
415 | free_pages((unsigned long)base, get_order(size)); | ||
416 | } | ||
417 | |||
418 | static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring, | ||
419 | dma_addr_t *dmaaddr, size_t size, | ||
420 | gfp_t gfp_flags) | ||
421 | { | ||
422 | void *base; | ||
423 | |||
424 | base = (void *)__get_free_pages(gfp_flags, get_order(size)); | ||
425 | if (!base) | ||
426 | return NULL; | ||
427 | memset(base, 0, size); | ||
428 | *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size, | ||
429 | DMA_TO_DEVICE); | ||
430 | if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) { | ||
431 | free_pages((unsigned long)base, get_order(size)); | ||
432 | return NULL; | ||
433 | } | ||
434 | |||
435 | return base; | ||
436 | } | ||
437 | |||
438 | static void * b43_get_and_map_ringmem(struct b43_dmaring *ring, | ||
439 | dma_addr_t *dmaaddr, size_t size) | ||
440 | { | ||
441 | void *base; | ||
442 | |||
443 | base = __b43_get_and_map_ringmem(ring, dmaaddr, size, | ||
444 | GFP_KERNEL); | ||
445 | if (!base) { | ||
446 | b43err(ring->dev->wl, "Failed to allocate or map pages " | ||
447 | "for DMA ringmemory\n"); | ||
448 | return NULL; | ||
449 | } | ||
450 | if (!b43_dma_address_ok(ring, *dmaaddr, size)) { | ||
451 | /* The memory does not fit our device constraints. | ||
452 | * Retry with GFP_DMA set to get lower memory. */ | ||
453 | b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); | ||
454 | base = __b43_get_and_map_ringmem(ring, dmaaddr, size, | ||
455 | GFP_KERNEL | GFP_DMA); | ||
456 | if (!base) { | ||
457 | b43err(ring->dev->wl, "Failed to allocate or map pages " | ||
458 | "in the GFP_DMA region for DMA ringmemory\n"); | ||
459 | return NULL; | ||
460 | } | ||
461 | if (!b43_dma_address_ok(ring, *dmaaddr, size)) { | ||
462 | b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); | ||
463 | b43err(ring->dev->wl, "Failed to allocate DMA " | ||
464 | "ringmemory that fits device constraints\n"); | ||
465 | return NULL; | ||
466 | } | ||
467 | } | ||
468 | /* We expect the memory to be 4k aligned, at least. */ | ||
469 | if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) { | ||
470 | b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); | ||
471 | return NULL; | ||
472 | } | ||
473 | |||
474 | return base; | ||
475 | } | ||
476 | |||
477 | static int alloc_ringmemory(struct b43_dmaring *ring) | 386 | static int alloc_ringmemory(struct b43_dmaring *ring) |
478 | { | 387 | { |
479 | unsigned int required; | 388 | gfp_t flags = GFP_KERNEL; |
480 | void *base; | 389 | |
481 | dma_addr_t dmaaddr; | 390 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K |
482 | 391 | * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing | |
483 | /* There are several requirements to the descriptor ring memory: | 392 | * has shown that 4K is sufficient for the latter as long as the buffer |
484 | * - The memory region needs to fit the address constraints for the | 393 | * does not cross an 8K boundary. |
485 | * device (same as for frame buffers). | 394 | * |
486 | * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned. | 395 | * For unknown reasons - possibly a hardware error - the BCM4311 rev |
487 | * - For 64bit DMA devices, the descriptor ring must be 8k aligned. | 396 | * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, |
397 | * which accounts for the GFP_DMA flag below. | ||
398 | * | ||
399 | * The flags here must match the flags in free_ringmemory below! | ||
488 | */ | 400 | */ |
489 | |||
490 | if (ring->type == B43_DMA_64BIT) | 401 | if (ring->type == B43_DMA_64BIT) |
491 | required = ring->nr_slots * sizeof(struct b43_dmadesc64); | 402 | flags |= GFP_DMA; |
492 | else | 403 | ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, |
493 | required = ring->nr_slots * sizeof(struct b43_dmadesc32); | 404 | B43_DMA_RINGMEMSIZE, |
494 | if (B43_WARN_ON(required > 0x1000)) | 405 | &(ring->dmabase), flags); |
406 | if (!ring->descbase) { | ||
407 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); | ||
495 | return -ENOMEM; | 408 | return -ENOMEM; |
496 | |||
497 | ring->alloc_descsize = 0x1000; | ||
498 | base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize); | ||
499 | if (!base) | ||
500 | return -ENOMEM; | ||
501 | ring->alloc_descbase = base; | ||
502 | ring->alloc_dmabase = dmaaddr; | ||
503 | |||
504 | if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) { | ||
505 | /* We're on <=32bit DMA, or we already got 8k aligned memory. | ||
506 | * That's all we need, so we're fine. */ | ||
507 | ring->descbase = base; | ||
508 | ring->dmabase = dmaaddr; | ||
509 | return 0; | ||
510 | } | ||
511 | b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize); | ||
512 | |||
513 | /* Ok, we failed at the 8k alignment requirement. | ||
514 | * Try to force-align the memory region now. */ | ||
515 | ring->alloc_descsize = 0x2000; | ||
516 | base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize); | ||
517 | if (!base) | ||
518 | return -ENOMEM; | ||
519 | ring->alloc_descbase = base; | ||
520 | ring->alloc_dmabase = dmaaddr; | ||
521 | |||
522 | if (is_8k_aligned(dmaaddr)) { | ||
523 | /* We're already 8k aligned. That Ok, too. */ | ||
524 | ring->descbase = base; | ||
525 | ring->dmabase = dmaaddr; | ||
526 | return 0; | ||
527 | } | 409 | } |
528 | /* Force-align it to 8k */ | 410 | memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); |
529 | ring->descbase = (void *)((u8 *)base + 0x1000); | ||
530 | ring->dmabase = dmaaddr + 0x1000; | ||
531 | B43_WARN_ON(!is_8k_aligned(ring->dmabase)); | ||
532 | 411 | ||
533 | return 0; | 412 | return 0; |
534 | } | 413 | } |
535 | 414 | ||
536 | static void free_ringmemory(struct b43_dmaring *ring) | 415 | static void free_ringmemory(struct b43_dmaring *ring) |
537 | { | 416 | { |
538 | b43_unmap_and_free_ringmem(ring, ring->alloc_descbase, | 417 | gfp_t flags = GFP_KERNEL; |
539 | ring->alloc_dmabase, ring->alloc_descsize); | 418 | |
419 | if (ring->type == B43_DMA_64BIT) | ||
420 | flags |= GFP_DMA; | ||
421 | |||
422 | ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, | ||
423 | ring->descbase, ring->dmabase, flags); | ||
540 | } | 424 | } |
541 | 425 | ||
542 | /* Reset the RX DMA channel */ | 426 | /* Reset the RX DMA channel */ |
@@ -646,14 +530,29 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, | |||
646 | if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) | 530 | if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) |
647 | return 1; | 531 | return 1; |
648 | 532 | ||
649 | if (!b43_dma_address_ok(ring, addr, buffersize)) { | 533 | switch (ring->type) { |
650 | /* We can't support this address. Unmap it again. */ | 534 | case B43_DMA_30BIT: |
651 | unmap_descbuffer(ring, addr, buffersize, dma_to_device); | 535 | if ((u64)addr + buffersize > (1ULL << 30)) |
652 | return 1; | 536 | goto address_error; |
537 | break; | ||
538 | case B43_DMA_32BIT: | ||
539 | if ((u64)addr + buffersize > (1ULL << 32)) | ||
540 | goto address_error; | ||
541 | break; | ||
542 | case B43_DMA_64BIT: | ||
543 | /* Currently we can't have addresses beyond | ||
544 | * 64bit in the kernel. */ | ||
545 | break; | ||
653 | } | 546 | } |
654 | 547 | ||
655 | /* The address is OK. */ | 548 | /* The address is OK. */ |
656 | return 0; | 549 | return 0; |
550 | |||
551 | address_error: | ||
552 | /* We can't support this address. Unmap it again. */ | ||
553 | unmap_descbuffer(ring, addr, buffersize, dma_to_device); | ||
554 | |||
555 | return 1; | ||
657 | } | 556 | } |
658 | 557 | ||
659 | static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) | 558 | static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) |
@@ -715,9 +614,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, | |||
715 | meta->dmaaddr = dmaaddr; | 614 | meta->dmaaddr = dmaaddr; |
716 | ring->ops->fill_descriptor(ring, desc, dmaaddr, | 615 | ring->ops->fill_descriptor(ring, desc, dmaaddr, |
717 | ring->rx_buffersize, 0, 0, 0); | 616 | ring->rx_buffersize, 0, 0, 0); |
718 | ssb_dma_sync_single_for_device(ring->dev->dev, | ||
719 | ring->alloc_dmabase, | ||
720 | ring->alloc_descsize, DMA_TO_DEVICE); | ||
721 | 617 | ||
722 | return 0; | 618 | return 0; |
723 | } | 619 | } |
@@ -1354,9 +1250,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1354 | } | 1250 | } |
1355 | /* Now transfer the whole frame. */ | 1251 | /* Now transfer the whole frame. */ |
1356 | wmb(); | 1252 | wmb(); |
1357 | ssb_dma_sync_single_for_device(ring->dev->dev, | ||
1358 | ring->alloc_dmabase, | ||
1359 | ring->alloc_descsize, DMA_TO_DEVICE); | ||
1360 | ops->poke_tx(ring, next_slot(ring, slot)); | 1253 | ops->poke_tx(ring, next_slot(ring, slot)); |
1361 | return 0; | 1254 | return 0; |
1362 | 1255 | ||