diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2019-05-28 09:36:19 -0400 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2019-05-28 09:36:19 -0400 |
commit | 86b3de60a0b634cdcef82d0a2091bc5444a00020 (patch) | |
tree | 2c9adf9f4171cc33baeb7ebc6200bc5b3cbafd73 /kernel | |
parent | a124692b698b00026a58d89831ceda2331b2e1d0 (diff) |
ring-buffer: Remove HAVE_64BIT_ALIGNED_ACCESS
Commit c19fa94a8fed ("Add HAVE_64BIT_ALIGNED_ACCESS") added the config for
architectures that required 64bit aligned access for all 64bit words. As
the ftrace ring buffer stores data on 4 byte alignment, this config option
was used to force it to store data on 8 byte alignment to make sure the data
being stored and written directly into the ring buffer was 8 byte aligned as
it would cause issues trying to write an 8 byte word on a 4 not 8 byte
aligned memory location.
But with the removal of the metag architecture, which was the only
architecture to use this, there is no architecture supported by Linux that
requires 8 byte aligne access for all 8 byte words (4 byte alignment is good
enough). Removing this config can simplify the code a bit.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ring_buffer.c | 17 |
1 files changed, 4 insertions, 13 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 05b0b3139ebc..66358d66c933 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -128,16 +128,7 @@ int ring_buffer_print_entry_header(struct trace_seq *s) | |||
128 | #define RB_ALIGNMENT 4U | 128 | #define RB_ALIGNMENT 4U |
129 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 129 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
130 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ | 130 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ |
131 | 131 | #define RB_ALIGN_DATA __aligned(RB_ALIGNMENT) | |
132 | #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS | ||
133 | # define RB_FORCE_8BYTE_ALIGNMENT 0 | ||
134 | # define RB_ARCH_ALIGNMENT RB_ALIGNMENT | ||
135 | #else | ||
136 | # define RB_FORCE_8BYTE_ALIGNMENT 1 | ||
137 | # define RB_ARCH_ALIGNMENT 8U | ||
138 | #endif | ||
139 | |||
140 | #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) | ||
141 | 132 | ||
142 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ | 133 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ |
143 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 134 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX |
@@ -2373,7 +2364,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
2373 | 2364 | ||
2374 | event->time_delta = delta; | 2365 | event->time_delta = delta; |
2375 | length -= RB_EVNT_HDR_SIZE; | 2366 | length -= RB_EVNT_HDR_SIZE; |
2376 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { | 2367 | if (length > RB_MAX_SMALL_DATA) { |
2377 | event->type_len = 0; | 2368 | event->type_len = 0; |
2378 | event->array[0] = length; | 2369 | event->array[0] = length; |
2379 | } else | 2370 | } else |
@@ -2388,11 +2379,11 @@ static unsigned rb_calculate_event_length(unsigned length) | |||
2388 | if (!length) | 2379 | if (!length) |
2389 | length++; | 2380 | length++; |
2390 | 2381 | ||
2391 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) | 2382 | if (length > RB_MAX_SMALL_DATA) |
2392 | length += sizeof(event.array[0]); | 2383 | length += sizeof(event.array[0]); |
2393 | 2384 | ||
2394 | length += RB_EVNT_HDR_SIZE; | 2385 | length += RB_EVNT_HDR_SIZE; |
2395 | length = ALIGN(length, RB_ARCH_ALIGNMENT); | 2386 | length = ALIGN(length, RB_ALIGNMENT); |
2396 | 2387 | ||
2397 | /* | 2388 | /* |
2398 | * In case the time delta is larger than the 27 bits for it | 2389 | * In case the time delta is larger than the 27 bits for it |