aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-01-09 06:42:46 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-03-05 14:35:15 -0500
commit3b1e79ed734f58ac41ca0a287ff03ca355f120ad (patch)
tree58ff425e5181df9f2fc317a612f2313054cce7aa /lib
parent30dfa90cc8c4c9621d8d5aa9499f3a5df3376307 (diff)
dma-debug: add allocator code
Impact: add allocator code for struct dma_debug_entry Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/dma-debug.c57
1 files changed, 57 insertions, 0 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 5ff7d2e2b60e..b60914669656 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -52,6 +52,16 @@ struct hash_bucket {
52 52
53/* Hash list to save the allocated dma addresses */ 53/* Hash list to save the allocated dma addresses */
54static struct hash_bucket dma_entry_hash[HASH_SIZE]; 54static struct hash_bucket dma_entry_hash[HASH_SIZE];
55/* List of pre-allocated dma_debug_entry's */
56static LIST_HEAD(free_entries);
57/* Lock for the list above */
58static DEFINE_SPINLOCK(free_entries_lock);
59
60/* Global disable flag - will be set in case of an error */
61static bool global_disable __read_mostly;
62
63static u32 num_free_entries;
64static u32 min_free_entries;
55 65
56/* 66/*
57 * Hash related functions 67 * Hash related functions
@@ -141,3 +151,50 @@ static void add_dma_entry(struct dma_debug_entry *entry)
141 put_hash_bucket(bucket, &flags); 151 put_hash_bucket(bucket, &flags);
142} 152}
143 153
154/* struct dma_entry allocator
155 *
156 * The next two functions implement the allocator for
157 * struct dma_debug_entries.
158 */
159static struct dma_debug_entry *dma_entry_alloc(void)
160{
161 struct dma_debug_entry *entry = NULL;
162 unsigned long flags;
163
164 spin_lock_irqsave(&free_entries_lock, flags);
165
166 if (list_empty(&free_entries)) {
167 printk(KERN_ERR "DMA-API: debugging out of memory "
168 "- disabling\n");
169 global_disable = true;
170 goto out;
171 }
172
173 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
174 list_del(&entry->list);
175 memset(entry, 0, sizeof(*entry));
176
177 num_free_entries -= 1;
178 if (num_free_entries < min_free_entries)
179 min_free_entries = num_free_entries;
180
181out:
182 spin_unlock_irqrestore(&free_entries_lock, flags);
183
184 return entry;
185}
186
187static void dma_entry_free(struct dma_debug_entry *entry)
188{
189 unsigned long flags;
190
191 /*
192 * add to beginning of the list - this way the entries are
193 * more likely cache hot when they are reallocated.
194 */
195 spin_lock_irqsave(&free_entries_lock, flags);
196 list_add(&entry->list, &free_entries);
197 num_free_entries += 1;
198 spin_unlock_irqrestore(&free_entries_lock, flags);
199}
200