diff options
author | Takashi Iwai <tiwai@suse.de> | 2008-08-08 11:09:09 -0400 |
---|---|---|
committer | Jaroslav Kysela <perex@perex.cz> | 2008-08-13 05:46:35 -0400 |
commit | 7eaa943c8ed8e91e05d0f5d0dc7a18e3319b45cf (patch) | |
tree | 51d86a4cb01cf5735b18c36ca62471f8c759a041 /sound/core/memalloc.c | |
parent | 5ef03460a6ffc1d3ee6b6f2abc6765d3e224cf89 (diff) |
ALSA: Kill snd_assert() in sound/core/*
Kill snd_assert() in sound/core/*, either removed or replaced with
if () with snd_BUG_ON().
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Signed-off-by: Jaroslav Kysela <perex@perex.cz>
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r-- | sound/core/memalloc.c | 41 |
1 files changed, 16 insertions, 25 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index f5d6d8d12979..4a649976cc8a 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c | |||
@@ -67,18 +67,6 @@ struct snd_mem_list { | |||
67 | /* id for pre-allocated buffers */ | 67 | /* id for pre-allocated buffers */ |
68 | #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1 | 68 | #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1 |
69 | 69 | ||
70 | #ifdef CONFIG_SND_DEBUG | ||
71 | #define __ASTRING__(x) #x | ||
72 | #define snd_assert(expr, args...) do {\ | ||
73 | if (!(expr)) {\ | ||
74 | printk(KERN_ERR "snd-malloc: BUG? (%s) (called from %p)\n", __ASTRING__(expr), __builtin_return_address(0));\ | ||
75 | args;\ | ||
76 | }\ | ||
77 | } while (0) | ||
78 | #else | ||
79 | #define snd_assert(expr, args...) /**/ | ||
80 | #endif | ||
81 | |||
82 | /* | 70 | /* |
83 | * | 71 | * |
84 | * Generic memory allocators | 72 | * Generic memory allocators |
@@ -111,8 +99,10 @@ void *snd_malloc_pages(size_t size, gfp_t gfp_flags) | |||
111 | int pg; | 99 | int pg; |
112 | void *res; | 100 | void *res; |
113 | 101 | ||
114 | snd_assert(size > 0, return NULL); | 102 | if (WARN_ON(!size)) |
115 | snd_assert(gfp_flags != 0, return NULL); | 103 | return NULL; |
104 | if (WARN_ON(!gfp_flags)) | ||
105 | return NULL; | ||
116 | gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ | 106 | gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ |
117 | pg = get_order(size); | 107 | pg = get_order(size); |
118 | if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) | 108 | if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) |
@@ -152,8 +142,8 @@ static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *d | |||
152 | void *res; | 142 | void *res; |
153 | gfp_t gfp_flags; | 143 | gfp_t gfp_flags; |
154 | 144 | ||
155 | snd_assert(size > 0, return NULL); | 145 | if (WARN_ON(!dma)) |
156 | snd_assert(dma != NULL, return NULL); | 146 | return NULL; |
157 | pg = get_order(size); | 147 | pg = get_order(size); |
158 | gfp_flags = GFP_KERNEL | 148 | gfp_flags = GFP_KERNEL |
159 | | __GFP_COMP /* compound page lets parts be mapped */ | 149 | | __GFP_COMP /* compound page lets parts be mapped */ |
@@ -189,8 +179,8 @@ static void *snd_malloc_sbus_pages(struct device *dev, size_t size, | |||
189 | int pg; | 179 | int pg; |
190 | void *res; | 180 | void *res; |
191 | 181 | ||
192 | snd_assert(size > 0, return NULL); | 182 | if (WARN_ON(!dma_addr)) |
193 | snd_assert(dma_addr != NULL, return NULL); | 183 | return NULL; |
194 | pg = get_order(size); | 184 | pg = get_order(size); |
195 | res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr); | 185 | res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr); |
196 | if (res != NULL) | 186 | if (res != NULL) |
@@ -236,8 +226,10 @@ static void snd_free_sbus_pages(struct device *dev, size_t size, | |||
236 | int snd_dma_alloc_pages(int type, struct device *device, size_t size, | 226 | int snd_dma_alloc_pages(int type, struct device *device, size_t size, |
237 | struct snd_dma_buffer *dmab) | 227 | struct snd_dma_buffer *dmab) |
238 | { | 228 | { |
239 | snd_assert(size > 0, return -ENXIO); | 229 | if (WARN_ON(!size)) |
240 | snd_assert(dmab != NULL, return -ENXIO); | 230 | return -ENXIO; |
231 | if (WARN_ON(!dmab)) | ||
232 | return -ENXIO; | ||
241 | 233 | ||
242 | dmab->dev.type = type; | 234 | dmab->dev.type = type; |
243 | dmab->dev.dev = device; | 235 | dmab->dev.dev = device; |
@@ -292,9 +284,6 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |||
292 | { | 284 | { |
293 | int err; | 285 | int err; |
294 | 286 | ||
295 | snd_assert(size > 0, return -ENXIO); | ||
296 | snd_assert(dmab != NULL, return -ENXIO); | ||
297 | |||
298 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { | 287 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { |
299 | if (err != -ENOMEM) | 288 | if (err != -ENOMEM) |
300 | return err; | 289 | return err; |
@@ -353,7 +342,8 @@ size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id) | |||
353 | { | 342 | { |
354 | struct snd_mem_list *mem; | 343 | struct snd_mem_list *mem; |
355 | 344 | ||
356 | snd_assert(dmab, return 0); | 345 | if (WARN_ON(!dmab)) |
346 | return 0; | ||
357 | 347 | ||
358 | mutex_lock(&list_mutex); | 348 | mutex_lock(&list_mutex); |
359 | list_for_each_entry(mem, &mem_list_head, list) { | 349 | list_for_each_entry(mem, &mem_list_head, list) { |
@@ -387,7 +377,8 @@ int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id) | |||
387 | { | 377 | { |
388 | struct snd_mem_list *mem; | 378 | struct snd_mem_list *mem; |
389 | 379 | ||
390 | snd_assert(dmab, return -EINVAL); | 380 | if (WARN_ON(!dmab)) |
381 | return -EINVAL; | ||
391 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); | 382 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); |
392 | if (! mem) | 383 | if (! mem) |
393 | return -ENOMEM; | 384 | return -ENOMEM; |