diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-08 06:49:20 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-02-23 12:24:11 -0500 |
commit | 459c1517f9873b198af7dcded8d8cc84749bbb69 (patch) | |
tree | afe04e856b766b4c3244fcddf95b1c977f28f234 /arch/arm/mm | |
parent | 6fc31d54443bdc25a8166be15e3920a7e39d195d (diff) |
ARM: DMA: top-down allocation in DMA coherent region
Achieve better usage of the DMA coherent region by doing top-down
allocation rather than bottom up. If we ask for a 128kB allocation,
this will be aligned to 128kB and satisfied from the very bottom
address. If we then ask for a 600kB allocation, this will be aligned
to 1MB, and we will have a 896kB hole.
Performing top-down allocation resolves this by allocating the 128kB
at the very top, and then the 600kB can come in below it without any
unnecessary wastage.
This problem was reported by Janusz Krzysztofik, who had 2 x 128kB +
1 x 640kB allocations which wouldn't fit into 1MB.
Tested-by: Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/vmregion.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index 935993e1b1ef..036fdbfdd62f 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c | |||
@@ -38,7 +38,7 @@ struct arm_vmregion * | |||
38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | 38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, |
39 | size_t size, gfp_t gfp) | 39 | size_t size, gfp_t gfp) |
40 | { | 40 | { |
41 | unsigned long addr = head->vm_start, end = head->vm_end - size; | 41 | unsigned long start = head->vm_start, addr = head->vm_end; |
42 | unsigned long flags; | 42 | unsigned long flags; |
43 | struct arm_vmregion *c, *new; | 43 | struct arm_vmregion *c, *new; |
44 | 44 | ||
@@ -54,21 +54,20 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | |||
54 | 54 | ||
55 | spin_lock_irqsave(&head->vm_lock, flags); | 55 | spin_lock_irqsave(&head->vm_lock, flags); |
56 | 56 | ||
57 | list_for_each_entry(c, &head->vm_list, vm_list) { | 57 | addr = rounddown(addr - size, align); |
58 | if ((addr + size) < addr) | 58 | list_for_each_entry_reverse(c, &head->vm_list, vm_list) { |
59 | goto nospc; | 59 | if (addr >= c->vm_end) |
60 | if ((addr + size) <= c->vm_start) | ||
61 | goto found; | 60 | goto found; |
62 | addr = ALIGN(c->vm_end, align); | 61 | addr = rounddown(c->vm_start - size, align); |
63 | if (addr > end) | 62 | if (addr < start) |
64 | goto nospc; | 63 | goto nospc; |
65 | } | 64 | } |
66 | 65 | ||
67 | found: | 66 | found: |
68 | /* | 67 | /* |
69 | * Insert this entry _before_ the one we found. | 68 | * Insert this entry after the one we found. |
70 | */ | 69 | */ |
71 | list_add_tail(&new->vm_list, &c->vm_list); | 70 | list_add(&new->vm_list, &c->vm_list); |
72 | new->vm_start = addr; | 71 | new->vm_start = addr; |
73 | new->vm_end = addr + size; | 72 | new->vm_end = addr + size; |
74 | new->vm_active = 1; | 73 | new->vm_active = 1; |