aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sound/core/memalloc.c62
1 files changed, 0 insertions, 62 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 23b7bc02728b..f5d6d8d12979 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -80,68 +80,6 @@ struct snd_mem_list {
80#endif 80#endif
81 81
82/* 82/*
83 * Hacks
84 */
85
86#if defined(__i386__)
87/*
88 * A hack to allocate large buffers via dma_alloc_coherent()
89 *
90 * since dma_alloc_coherent always tries GFP_DMA when the requested
91 * pci memory region is below 32bit, it happens quite often that even
92 * 2 order of pages cannot be allocated.
93 *
94 * so in the following, we allocate at first without dma_mask, so that
95 * allocation will be done without GFP_DMA. if the area doesn't match
96 * with the requested region, then realloate with the original dma_mask
97 * again.
98 *
99 * Really, we want to move this type of thing into dma_alloc_coherent()
100 * so dma_mask doesn't have to be messed with.
101 */
102
103static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
104 dma_addr_t *dma_handle,
105 gfp_t flags)
106{
107 void *ret;
108 u64 dma_mask, coherent_dma_mask;
109
110 if (dev == NULL || !dev->dma_mask)
111 return dma_alloc_coherent(dev, size, dma_handle, flags);
112 dma_mask = *dev->dma_mask;
113 coherent_dma_mask = dev->coherent_dma_mask;
114 *dev->dma_mask = 0xffffffff; /* do without masking */
115 dev->coherent_dma_mask = 0xffffffff; /* do without masking */
116 ret = dma_alloc_coherent(dev, size, dma_handle, flags);
117 *dev->dma_mask = dma_mask; /* restore */
118 dev->coherent_dma_mask = coherent_dma_mask; /* restore */
119 if (ret) {
120 /* obtained address is out of range? */
121 if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) {
122 /* reallocate with the proper mask */
123 dma_free_coherent(dev, size, ret, *dma_handle);
124 ret = dma_alloc_coherent(dev, size, dma_handle, flags);
125 }
126 } else {
127 /* wish to success now with the proper mask... */
128 if (dma_mask != 0xffffffffUL) {
129 /* allocation with GFP_ATOMIC to avoid the long stall */
130 flags &= ~GFP_KERNEL;
131 flags |= GFP_ATOMIC;
132 ret = dma_alloc_coherent(dev, size, dma_handle, flags);
133 }
134 }
135 return ret;
136}
137
138/* redefine dma_alloc_coherent for some architectures */
139#undef dma_alloc_coherent
140#define dma_alloc_coherent snd_dma_hack_alloc_coherent
141
142#endif /* arch */
143
144/*
145 * 83 *
146 * Generic memory allocators 84 * Generic memory allocators
147 * 85 *