aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2006-01-31 08:44:28 -0500
committerJaroslav Kysela <perex@suse.cz>2006-03-22 04:28:12 -0500
commit2ba8c15c738b64b4d3acaace1e19750362ff2b69 (patch)
tree14407aac8fe77a92424b6df73f0c3e39d83c7fc7
parentea50888d83cfb797ff7efadedc033b33bc2064bc (diff)
[ALSA] Removed unneeded page-reserve
Modules: Memalloc module Removed unneeded page-reservation. Signed-off-by: Takashi Iwai <tiwai@suse.de>
-rw-r--r--sound/core/memalloc.c32
1 files changed, 2 insertions, 30 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 8360418d333e..3fc6f97075ed 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -141,10 +141,6 @@ static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
141 141
142#endif /* arch */ 142#endif /* arch */
143 143
144#if ! defined(__arm__)
145#define NEED_RESERVE_PAGES
146#endif
147
148/* 144/*
149 * 145 *
150 * Generic memory allocators 146 * Generic memory allocators
@@ -163,20 +159,6 @@ static inline void dec_snd_pages(int order)
163 snd_allocated_pages -= 1 << order; 159 snd_allocated_pages -= 1 << order;
164} 160}
165 161
166static void mark_pages(struct page *page, int order)
167{
168 struct page *last_page = page + (1 << order);
169 while (page < last_page)
170 SetPageReserved(page++);
171}
172
173static void unmark_pages(struct page *page, int order)
174{
175 struct page *last_page = page + (1 << order);
176 while (page < last_page)
177 ClearPageReserved(page++);
178}
179
180/** 162/**
181 * snd_malloc_pages - allocate pages with the given size 163 * snd_malloc_pages - allocate pages with the given size
182 * @size: the size to allocate in bytes 164 * @size: the size to allocate in bytes
@@ -195,10 +177,8 @@ void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
195 snd_assert(gfp_flags != 0, return NULL); 177 snd_assert(gfp_flags != 0, return NULL);
196 gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ 178 gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
197 pg = get_order(size); 179 pg = get_order(size);
198 if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) { 180 if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
199 mark_pages(virt_to_page(res), pg);
200 inc_snd_pages(pg); 181 inc_snd_pages(pg);
201 }
202 return res; 182 return res;
203} 183}
204 184
@@ -217,7 +197,6 @@ void snd_free_pages(void *ptr, size_t size)
217 return; 197 return;
218 pg = get_order(size); 198 pg = get_order(size);
219 dec_snd_pages(pg); 199 dec_snd_pages(pg);
220 unmark_pages(virt_to_page(ptr), pg);
221 free_pages((unsigned long) ptr, pg); 200 free_pages((unsigned long) ptr, pg);
222} 201}
223 202
@@ -242,12 +221,8 @@ static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *d
242 | __GFP_NORETRY /* don't trigger OOM-killer */ 221 | __GFP_NORETRY /* don't trigger OOM-killer */
243 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ 222 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
244 res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); 223 res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
245 if (res != NULL) { 224 if (res != NULL)
246#ifdef NEED_RESERVE_PAGES
247 mark_pages(virt_to_page(res), pg); /* should be dma_to_page() */
248#endif
249 inc_snd_pages(pg); 225 inc_snd_pages(pg);
250 }
251 226
252 return res; 227 return res;
253} 228}
@@ -262,9 +237,6 @@ static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
262 return; 237 return;
263 pg = get_order(size); 238 pg = get_order(size);
264 dec_snd_pages(pg); 239 dec_snd_pages(pg);
265#ifdef NEED_RESERVE_PAGES
266 unmark_pages(virt_to_page(ptr), pg); /* should be dma_to_page() */
267#endif
268 dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); 240 dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
269} 241}
270 242