diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2005-10-30 16:12:08 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2005-10-30 16:12:08 -0500 |
commit | cb7610d018235653c73ff1fea79b962c16317474 (patch) | |
tree | 94bfee553792813e6d6481de6a30369cd12a6b53 | |
parent | 1d1fd66c45fa78c6fed61612e14dad0e24c815c2 (diff) |
[ARM] Clean up dmabounce
Encapsulate pool data into dmabounce_pool. Only account successful
allocations. Use dma_mapping_error().
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/common/dmabounce.c | 165 |
1 files changed, 86 insertions, 79 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index cbf2165476b0..ad6c89a555bb 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -33,8 +33,8 @@ | |||
33 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
34 | 34 | ||
35 | #undef DEBUG | 35 | #undef DEBUG |
36 | |||
37 | #undef STATS | 36 | #undef STATS |
37 | |||
38 | #ifdef STATS | 38 | #ifdef STATS |
39 | #define DO_STATS(X) do { X ; } while (0) | 39 | #define DO_STATS(X) do { X ; } while (0) |
40 | #else | 40 | #else |
@@ -52,26 +52,31 @@ struct safe_buffer { | |||
52 | int direction; | 52 | int direction; |
53 | 53 | ||
54 | /* safe buffer info */ | 54 | /* safe buffer info */ |
55 | struct dma_pool *pool; | 55 | struct dmabounce_pool *pool; |
56 | void *safe; | 56 | void *safe; |
57 | dma_addr_t safe_dma_addr; | 57 | dma_addr_t safe_dma_addr; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | struct dmabounce_pool { | ||
61 | unsigned long size; | ||
62 | struct dma_pool *pool; | ||
63 | #ifdef STATS | ||
64 | unsigned long allocs; | ||
65 | #endif | ||
66 | }; | ||
67 | |||
60 | struct dmabounce_device_info { | 68 | struct dmabounce_device_info { |
61 | struct list_head node; | 69 | struct list_head node; |
62 | 70 | ||
63 | struct device *dev; | 71 | struct device *dev; |
64 | struct dma_pool *small_buffer_pool; | ||
65 | struct dma_pool *large_buffer_pool; | ||
66 | struct list_head safe_buffers; | 72 | struct list_head safe_buffers; |
67 | unsigned long small_buffer_size, large_buffer_size; | ||
68 | #ifdef STATS | 73 | #ifdef STATS |
69 | unsigned long sbp_allocs; | ||
70 | unsigned long lbp_allocs; | ||
71 | unsigned long total_allocs; | 74 | unsigned long total_allocs; |
72 | unsigned long map_op_count; | 75 | unsigned long map_op_count; |
73 | unsigned long bounce_count; | 76 | unsigned long bounce_count; |
74 | #endif | 77 | #endif |
78 | struct dmabounce_pool small; | ||
79 | struct dmabounce_pool large; | ||
75 | }; | 80 | }; |
76 | 81 | ||
77 | static LIST_HEAD(dmabounce_devs); | 82 | static LIST_HEAD(dmabounce_devs); |
@@ -82,9 +87,9 @@ static void print_alloc_stats(struct dmabounce_device_info *device_info) | |||
82 | printk(KERN_INFO | 87 | printk(KERN_INFO |
83 | "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", | 88 | "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", |
84 | device_info->dev->bus_id, | 89 | device_info->dev->bus_id, |
85 | device_info->sbp_allocs, device_info->lbp_allocs, | 90 | device_info->small.allocs, device_info->large.allocs, |
86 | device_info->total_allocs - device_info->sbp_allocs - | 91 | device_info->total_allocs - device_info->small.allocs - |
87 | device_info->lbp_allocs, | 92 | device_info->large.allocs, |
88 | device_info->total_allocs); | 93 | device_info->total_allocs); |
89 | } | 94 | } |
90 | #endif | 95 | #endif |
@@ -106,18 +111,22 @@ find_dmabounce_dev(struct device *dev) | |||
106 | /* allocate a 'safe' buffer and keep track of it */ | 111 | /* allocate a 'safe' buffer and keep track of it */ |
107 | static inline struct safe_buffer * | 112 | static inline struct safe_buffer * |
108 | alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | 113 | alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, |
109 | size_t size, enum dma_data_direction dir) | 114 | size_t size, enum dma_data_direction dir) |
110 | { | 115 | { |
111 | struct safe_buffer *buf; | 116 | struct safe_buffer *buf; |
112 | struct dma_pool *pool; | 117 | struct dmabounce_pool *pool; |
113 | struct device *dev = device_info->dev; | 118 | struct device *dev = device_info->dev; |
114 | void *safe; | ||
115 | dma_addr_t safe_dma_addr; | ||
116 | 119 | ||
117 | dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", | 120 | dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", |
118 | __func__, ptr, size, dir); | 121 | __func__, ptr, size, dir); |
119 | 122 | ||
120 | DO_STATS ( device_info->total_allocs++ ); | 123 | if (size <= device_info->small.size) { |
124 | pool = &device_info->small; | ||
125 | } else if (size <= device_info->large.size) { | ||
126 | pool = &device_info->large; | ||
127 | } else { | ||
128 | pool = NULL; | ||
129 | } | ||
121 | 130 | ||
122 | buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); | 131 | buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); |
123 | if (buf == NULL) { | 132 | if (buf == NULL) { |
@@ -125,41 +134,35 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | |||
125 | return NULL; | 134 | return NULL; |
126 | } | 135 | } |
127 | 136 | ||
128 | if (size <= device_info->small_buffer_size) { | 137 | buf->ptr = ptr; |
129 | pool = device_info->small_buffer_pool; | 138 | buf->size = size; |
130 | safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); | 139 | buf->direction = dir; |
131 | 140 | buf->pool = pool; | |
132 | DO_STATS ( device_info->sbp_allocs++ ); | ||
133 | } else if (size <= device_info->large_buffer_size) { | ||
134 | pool = device_info->large_buffer_pool; | ||
135 | safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); | ||
136 | 141 | ||
137 | DO_STATS ( device_info->lbp_allocs++ ); | 142 | if (pool) { |
143 | buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, | ||
144 | &buf->safe_dma_addr); | ||
138 | } else { | 145 | } else { |
139 | pool = NULL; | 146 | buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, |
140 | safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC); | 147 | GFP_ATOMIC); |
141 | } | 148 | } |
142 | 149 | ||
143 | if (safe == NULL) { | 150 | if (buf->safe == NULL) { |
144 | dev_warn(device_info->dev, | 151 | dev_warn(dev, |
145 | "%s: could not alloc dma memory (size=%d)\n", | 152 | "%s: could not alloc dma memory (size=%d)\n", |
146 | __func__, size); | 153 | __func__, size); |
147 | kfree(buf); | 154 | kfree(buf); |
148 | return NULL; | 155 | return NULL; |
149 | } | 156 | } |
150 | 157 | ||
151 | #ifdef STATS | 158 | #ifdef STATS |
159 | if (pool) | ||
160 | pool->allocs++; | ||
161 | device_info->total_allocs++; | ||
152 | if (device_info->total_allocs % 1000 == 0) | 162 | if (device_info->total_allocs % 1000 == 0) |
153 | print_alloc_stats(device_info); | 163 | print_alloc_stats(device_info); |
154 | #endif | 164 | #endif |
155 | 165 | ||
156 | buf->ptr = ptr; | ||
157 | buf->size = size; | ||
158 | buf->direction = dir; | ||
159 | buf->pool = pool; | ||
160 | buf->safe = safe; | ||
161 | buf->safe_dma_addr = safe_dma_addr; | ||
162 | |||
163 | list_add(&buf->node, &device_info->safe_buffers); | 166 | list_add(&buf->node, &device_info->safe_buffers); |
164 | 167 | ||
165 | return buf; | 168 | return buf; |
@@ -186,7 +189,7 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * | |||
186 | list_del(&buf->node); | 189 | list_del(&buf->node); |
187 | 190 | ||
188 | if (buf->pool) | 191 | if (buf->pool) |
189 | dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr); | 192 | dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); |
190 | else | 193 | else |
191 | dma_free_coherent(device_info->dev, buf->size, buf->safe, | 194 | dma_free_coherent(device_info->dev, buf->size, buf->safe, |
192 | buf->safe_dma_addr); | 195 | buf->safe_dma_addr); |
@@ -197,12 +200,10 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * | |||
197 | /* ************************************************** */ | 200 | /* ************************************************** */ |
198 | 201 | ||
199 | #ifdef STATS | 202 | #ifdef STATS |
200 | |||
201 | static void print_map_stats(struct dmabounce_device_info *device_info) | 203 | static void print_map_stats(struct dmabounce_device_info *device_info) |
202 | { | 204 | { |
203 | printk(KERN_INFO | 205 | dev_info(device_info->dev, |
204 | "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n", | 206 | "dmabounce: map_op_count=%lu, bounce_count=%lu\n", |
205 | device_info->dev->bus_id, | ||
206 | device_info->map_op_count, device_info->bounce_count); | 207 | device_info->map_op_count, device_info->bounce_count); |
207 | } | 208 | } |
208 | #endif | 209 | #endif |
@@ -258,13 +259,13 @@ map_single(struct device *dev, void *ptr, size_t size, | |||
258 | __func__, ptr, buf->safe, size); | 259 | __func__, ptr, buf->safe, size); |
259 | memcpy(buf->safe, ptr, size); | 260 | memcpy(buf->safe, ptr, size); |
260 | } | 261 | } |
261 | consistent_sync(buf->safe, size, dir); | 262 | ptr = buf->safe; |
262 | 263 | ||
263 | dma_addr = buf->safe_dma_addr; | 264 | dma_addr = buf->safe_dma_addr; |
264 | } else { | ||
265 | consistent_sync(ptr, size, dir); | ||
266 | } | 265 | } |
267 | 266 | ||
267 | consistent_sync(ptr, size, dir); | ||
268 | |||
268 | return dma_addr; | 269 | return dma_addr; |
269 | } | 270 | } |
270 | 271 | ||
@@ -278,7 +279,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
278 | /* | 279 | /* |
279 | * Trying to unmap an invalid mapping | 280 | * Trying to unmap an invalid mapping |
280 | */ | 281 | */ |
281 | if (dma_addr == ~0) { | 282 | if (dma_mapping_error(dma_addr)) { |
282 | dev_err(dev, "Trying to unmap invalid mapping\n"); | 283 | dev_err(dev, "Trying to unmap invalid mapping\n"); |
283 | return; | 284 | return; |
284 | } | 285 | } |
@@ -570,11 +571,25 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |||
570 | local_irq_restore(flags); | 571 | local_irq_restore(flags); |
571 | } | 572 | } |
572 | 573 | ||
574 | static int | ||
575 | dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, | ||
576 | unsigned long size) | ||
577 | { | ||
578 | pool->size = size; | ||
579 | DO_STATS(pool->allocs = 0); | ||
580 | pool->pool = dma_pool_create(name, dev, size, | ||
581 | 0 /* byte alignment */, | ||
582 | 0 /* no page-crossing issues */); | ||
583 | |||
584 | return pool->pool ? 0 : -ENOMEM; | ||
585 | } | ||
586 | |||
573 | int | 587 | int |
574 | dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | 588 | dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, |
575 | unsigned long large_buffer_size) | 589 | unsigned long large_buffer_size) |
576 | { | 590 | { |
577 | struct dmabounce_device_info *device_info; | 591 | struct dmabounce_device_info *device_info; |
592 | int ret; | ||
578 | 593 | ||
579 | device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); | 594 | device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); |
580 | if (!device_info) { | 595 | if (!device_info) { |
@@ -584,45 +599,31 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |||
584 | return -ENOMEM; | 599 | return -ENOMEM; |
585 | } | 600 | } |
586 | 601 | ||
587 | device_info->small_buffer_pool = | 602 | ret = dmabounce_init_pool(&device_info->small, dev, |
588 | dma_pool_create("small_dmabounce_pool", | 603 | "small_dmabounce_pool", small_buffer_size); |
589 | dev, | 604 | if (ret) { |
590 | small_buffer_size, | 605 | dev_err(dev, |
591 | 0 /* byte alignment */, | 606 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", |
592 | 0 /* no page-crossing issues */); | 607 | small_buffer_size); |
593 | if (!device_info->small_buffer_pool) { | 608 | goto err_free; |
594 | printk(KERN_ERR | ||
595 | "dmabounce: could not allocate small DMA pool for %s\n", | ||
596 | dev->bus_id); | ||
597 | kfree(device_info); | ||
598 | return -ENOMEM; | ||
599 | } | 609 | } |
600 | 610 | ||
601 | if (large_buffer_size) { | 611 | if (large_buffer_size) { |
602 | device_info->large_buffer_pool = | 612 | ret = dmabounce_init_pool(&device_info->large, dev, |
603 | dma_pool_create("large_dmabounce_pool", | 613 | "large_dmabounce_pool", |
604 | dev, | 614 | large_buffer_size); |
605 | large_buffer_size, | 615 | if (ret) { |
606 | 0 /* byte alignment */, | 616 | dev_err(dev, |
607 | 0 /* no page-crossing issues */); | 617 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", |
608 | if (!device_info->large_buffer_pool) { | 618 | large_buffer_size); |
609 | printk(KERN_ERR | 619 | goto err_destroy; |
610 | "dmabounce: could not allocate large DMA pool for %s\n", | ||
611 | dev->bus_id); | ||
612 | dma_pool_destroy(device_info->small_buffer_pool); | ||
613 | |||
614 | return -ENOMEM; | ||
615 | } | 620 | } |
616 | } | 621 | } |
617 | 622 | ||
618 | device_info->dev = dev; | 623 | device_info->dev = dev; |
619 | device_info->small_buffer_size = small_buffer_size; | ||
620 | device_info->large_buffer_size = large_buffer_size; | ||
621 | INIT_LIST_HEAD(&device_info->safe_buffers); | 624 | INIT_LIST_HEAD(&device_info->safe_buffers); |
622 | 625 | ||
623 | #ifdef STATS | 626 | #ifdef STATS |
624 | device_info->sbp_allocs = 0; | ||
625 | device_info->lbp_allocs = 0; | ||
626 | device_info->total_allocs = 0; | 627 | device_info->total_allocs = 0; |
627 | device_info->map_op_count = 0; | 628 | device_info->map_op_count = 0; |
628 | device_info->bounce_count = 0; | 629 | device_info->bounce_count = 0; |
@@ -634,6 +635,12 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |||
634 | dev->bus_id, dev->bus->name); | 635 | dev->bus_id, dev->bus->name); |
635 | 636 | ||
636 | return 0; | 637 | return 0; |
638 | |||
639 | err_destroy: | ||
640 | dma_pool_destroy(device_info->small.pool); | ||
641 | err_free: | ||
642 | kfree(device_info); | ||
643 | return ret; | ||
637 | } | 644 | } |
638 | 645 | ||
639 | void | 646 | void |
@@ -655,10 +662,10 @@ dmabounce_unregister_dev(struct device *dev) | |||
655 | BUG(); | 662 | BUG(); |
656 | } | 663 | } |
657 | 664 | ||
658 | if (device_info->small_buffer_pool) | 665 | if (device_info->small.pool) |
659 | dma_pool_destroy(device_info->small_buffer_pool); | 666 | dma_pool_destroy(device_info->small.pool); |
660 | if (device_info->large_buffer_pool) | 667 | if (device_info->large.pool) |
661 | dma_pool_destroy(device_info->large_buffer_pool); | 668 | dma_pool_destroy(device_info->large.pool); |
662 | 669 | ||
663 | #ifdef STATS | 670 | #ifdef STATS |
664 | print_alloc_stats(device_info); | 671 | print_alloc_stats(device_info); |