aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Frysinger <vapier@gentoo.org>2009-07-01 11:42:13 -0400
committerMike Frysinger <vapier@gentoo.org>2009-09-16 21:31:41 -0400
commit81c969a8bc4bdc39032f6c58e50e61a8daeeb655 (patch)
tree49e9252e316ae9cb021d595de4a07cb0cf1e89d3
parent9c954f89412b346e0aeec01c5729cb7ca63e2673 (diff)
Blackfin: push SRAM locks down into related ifdefs
Rather than defining the locks and initializing them all the time, only do so when we actually need them (i.e. the SRAM regions exist). This avoids dead data and code bloat during runtime. Signed-off-by: Mike Frysinger <vapier@gentoo.org>
-rw-r--r--arch/blackfin/mm/sram-alloc.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 99e4dbb1dfd1..eb63ab353e5a 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -42,11 +42,6 @@
42#include <asm/mem_map.h> 42#include <asm/mem_map.h>
43#include "blackfin_sram.h" 43#include "blackfin_sram.h"
44 44
45static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
46static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
47static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
48static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
49
50/* the data structure for L1 scratchpad and DATA SRAM */ 45/* the data structure for L1 scratchpad and DATA SRAM */
51struct sram_piece { 46struct sram_piece {
52 void *paddr; 47 void *paddr;
@@ -55,6 +50,7 @@ struct sram_piece {
55 struct sram_piece *next; 50 struct sram_piece *next;
56}; 51};
57 52
53static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
58static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head); 54static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
59static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head); 55static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
60 56
@@ -68,12 +64,18 @@ static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
68static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head); 64static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
69#endif 65#endif
70 66
67#if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
68static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
69#endif
70
71#if L1_CODE_LENGTH != 0 71#if L1_CODE_LENGTH != 0
72static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
72static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head); 73static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
73static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head); 74static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
74#endif 75#endif
75 76
76#if L2_LENGTH != 0 77#if L2_LENGTH != 0
78static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
77static struct sram_piece free_l2_sram_head, used_l2_sram_head; 79static struct sram_piece free_l2_sram_head, used_l2_sram_head;
78#endif 80#endif
79 81
@@ -225,10 +227,10 @@ static void __init l2_sram_init(void)
225 printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n", 227 printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
226 L2_LENGTH >> 10, 228 L2_LENGTH >> 10,
227 free_l2_sram_head.next->size >> 10); 229 free_l2_sram_head.next->size >> 10);
228#endif
229 230
230 /* mutex initialize */ 231 /* mutex initialize */
231 spin_lock_init(&l2_sram_lock); 232 spin_lock_init(&l2_sram_lock);
233#endif
232} 234}
233 235
234static int __init bfin_sram_init(void) 236static int __init bfin_sram_init(void)
@@ -416,18 +418,17 @@ EXPORT_SYMBOL(sram_free);
416 418
417void *l1_data_A_sram_alloc(size_t size) 419void *l1_data_A_sram_alloc(size_t size)
418{ 420{
421#if L1_DATA_A_LENGTH != 0
419 unsigned long flags; 422 unsigned long flags;
420 void *addr = NULL; 423 void *addr;
421 unsigned int cpu; 424 unsigned int cpu;
422 425
423 cpu = get_cpu(); 426 cpu = get_cpu();
424 /* add mutex operation */ 427 /* add mutex operation */
425 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 428 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
426 429
427#if L1_DATA_A_LENGTH != 0
428 addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu), 430 addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
429 &per_cpu(used_l1_data_A_sram_head, cpu)); 431 &per_cpu(used_l1_data_A_sram_head, cpu));
430#endif
431 432
432 /* add mutex operation */ 433 /* add mutex operation */
433 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 434 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
@@ -437,11 +438,15 @@ void *l1_data_A_sram_alloc(size_t size)
437 (long unsigned int)addr, size); 438 (long unsigned int)addr, size);
438 439
439 return addr; 440 return addr;
441#else
442 return NULL;
443#endif
440} 444}
441EXPORT_SYMBOL(l1_data_A_sram_alloc); 445EXPORT_SYMBOL(l1_data_A_sram_alloc);
442 446
443int l1_data_A_sram_free(const void *addr) 447int l1_data_A_sram_free(const void *addr)
444{ 448{
449#if L1_DATA_A_LENGTH != 0
445 unsigned long flags; 450 unsigned long flags;
446 int ret; 451 int ret;
447 unsigned int cpu; 452 unsigned int cpu;
@@ -450,18 +455,17 @@ int l1_data_A_sram_free(const void *addr)
450 /* add mutex operation */ 455 /* add mutex operation */
451 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 456 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
452 457
453#if L1_DATA_A_LENGTH != 0
454 ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu), 458 ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
455 &per_cpu(used_l1_data_A_sram_head, cpu)); 459 &per_cpu(used_l1_data_A_sram_head, cpu));
456#else
457 ret = -1;
458#endif
459 460
460 /* add mutex operation */ 461 /* add mutex operation */
461 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 462 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
462 put_cpu(); 463 put_cpu();
463 464
464 return ret; 465 return ret;
466#else
467 return -1;
468#endif
465} 469}
466EXPORT_SYMBOL(l1_data_A_sram_free); 470EXPORT_SYMBOL(l1_data_A_sram_free);
467 471