aboutsummaryrefslogtreecommitdiffstats
path: root/fs/pstore
diff options
context:
space:
mode:
authorRob Herring <rob.herring@calxeda.com>2013-04-08 21:23:33 -0400
committerTony Luck <tony.luck@intel.com>2013-06-14 18:54:21 -0400
commit0405a5cec3406f19e69da07c8111a6bf1088ac29 (patch)
treee56d8e5f9d16c799a48da12947b907d0e76d7121 /fs/pstore
parent77418921649427577143667fcf00ccb8a809762a (diff)
pstore/ram: avoid atomic accesses for ioremapped regions
For persistent RAM outside of main memory, the memory may have limitations on supported accesses. For internal RAM on highbank platform exclusive accesses are not supported and will hang the system. So atomic_cmpxchg cannot be used. This commit uses spinlock protection for buffer size and start updates on ioremapped regions instead. Signed-off-by: Rob Herring <rob.herring@calxeda.com> Acked-by: Anton Vorontsov <anton@enomsg.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'fs/pstore')
-rw-r--r--fs/pstore/ram_core.c54
1 files changed, 52 insertions, 2 deletions
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 59337326e288..de272d426763 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -46,7 +46,7 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
46} 46}
47 47
48/* increase and wrap the start pointer, returning the old value */ 48/* increase and wrap the start pointer, returning the old value */
49static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) 49static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
50{ 50{
51 int old; 51 int old;
52 int new; 52 int new;
@@ -62,7 +62,7 @@ static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
62} 62}
63 63
64/* increase the size counter until it hits the max size */ 64/* increase the size counter until it hits the max size */
65static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a) 65static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
66{ 66{
67 size_t old; 67 size_t old;
68 size_t new; 68 size_t new;
@@ -78,6 +78,53 @@ static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
78 } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old); 78 } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
79} 79}
80 80
81static DEFINE_RAW_SPINLOCK(buffer_lock);
82
83/* increase and wrap the start pointer, returning the old value */
84static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
85{
86 int old;
87 int new;
88 unsigned long flags;
89
90 raw_spin_lock_irqsave(&buffer_lock, flags);
91
92 old = atomic_read(&prz->buffer->start);
93 new = old + a;
94 while (unlikely(new > prz->buffer_size))
95 new -= prz->buffer_size;
96 atomic_set(&prz->buffer->start, new);
97
98 raw_spin_unlock_irqrestore(&buffer_lock, flags);
99
100 return old;
101}
102
103/* increase the size counter until it hits the max size */
104static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
105{
106 size_t old;
107 size_t new;
108 unsigned long flags;
109
110 raw_spin_lock_irqsave(&buffer_lock, flags);
111
112 old = atomic_read(&prz->buffer->size);
113 if (old == prz->buffer_size)
114 goto exit;
115
116 new = old + a;
117 if (new > prz->buffer_size)
118 new = prz->buffer_size;
119 atomic_set(&prz->buffer->size, new);
120
121exit:
122 raw_spin_unlock_irqrestore(&buffer_lock, flags);
123}
124
125static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
126static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
127
81static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, 128static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
82 uint8_t *data, size_t len, uint8_t *ecc) 129 uint8_t *data, size_t len, uint8_t *ecc)
83{ 130{
@@ -372,6 +419,9 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size)
372 return NULL; 419 return NULL;
373 } 420 }
374 421
422 buffer_start_add = buffer_start_add_locked;
423 buffer_size_add = buffer_size_add_locked;
424
375 return ioremap(start, size); 425 return ioremap(start, size);
376} 426}
377 427