summaryrefslogtreecommitdiffstats
path: root/lib/stackdepot.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 16:11:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 16:11:48 -0400
commit2c6a392cddacde153865b15e8295ad0a35ed3c02 (patch)
tree558c34595f8987c87d26fc0fa0dc644fca9ef2cd /lib/stackdepot.c
parent0a499fc5c37e6db096969a83534fd98a2bf2b36c (diff)
parent3599fe12a125fa7118da2bcc5033d7741fb5f3a1 (diff)
Merge branch 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull stack trace updates from Ingo Molnar: "So Thomas looked at the stacktrace code recently and noticed a few weirdnesses, and we all know how such stories of crummy kernel code meeting German engineering perfection end: a 45-patch series to clean it all up! :-) Here's the changes in Thomas's words: 'Struct stack_trace is a sinkhole for input and output parameters which is largely pointless for most usage sites. In fact if embedded into other data structures it creates indirections and extra storage overhead for no benefit. Looking at all usage sites makes it clear that they just require an interface which is based on a storage array. That array is either on stack, global or embedded into some other data structure. Some of the stack depot usage sites are outright wrong, but fortunately the wrongness just causes more stack being used for nothing and does not have functional impact. Another oddity is the inconsistent termination of the stack trace with ULONG_MAX. It's pointless as the number of entries is what determines the length of the stored trace. In fact quite some call sites remove the ULONG_MAX marker afterwards with or without nasty comments about it. Not all architectures do that and those which do, do it inconsistenly either conditional on nr_entries == 0 or unconditionally. The following series cleans that up by: 1) Removing the ULONG_MAX termination in the architecture code 2) Removing the ULONG_MAX fixups at the call sites 3) Providing plain storage array based interfaces for stacktrace and stackdepot. 4) Cleaning up the mess at the callsites including some related cleanups. 5) Removing the struct stack_trace based interfaces This is not changing the struct stack_trace interfaces at the architecture level, but it removes the exposure to the generic code'" * 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits) x86/stacktrace: Use common infrastructure stacktrace: Provide common infrastructure lib/stackdepot: Remove obsolete functions stacktrace: Remove obsolete functions livepatch: Simplify stack trace retrieval tracing: Remove the last struct stack_trace usage tracing: Simplify stack trace retrieval tracing: Make ftrace_trace_userstack() static and conditional tracing: Use percpu stack trace buffer more intelligently tracing: Simplify stacktrace retrieval in histograms lockdep: Simplify stack trace handling lockdep: Remove save argument from check_prev_add() lockdep: Remove unused trace argument from print_circular_bug() drm: Simplify stacktrace handling dm persistent data: Simplify stack trace handling dm bufio: Simplify stack trace retrieval btrfs: ref-verify: Simplify stack trace retrieval dma/debug: Simplify stracktrace retrieval fault-inject: Simplify stacktrace retrieval mm/page_owner: Simplify stack trace handling ...
Diffstat (limited to 'lib/stackdepot.c')
-rw-r--r--lib/stackdepot.c54
1 files changed, 33 insertions, 21 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index e513459a5601..605c61f65d94 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -194,40 +194,52 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
194 return NULL; 194 return NULL;
195} 195}
196 196
197void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace) 197/**
198 * stack_depot_fetch - Fetch stack entries from a depot
199 *
200 * @handle: Stack depot handle which was returned from
201 * stack_depot_save().
202 * @entries: Pointer to store the entries address
203 *
204 * Return: The number of trace entries for this depot.
205 */
206unsigned int stack_depot_fetch(depot_stack_handle_t handle,
207 unsigned long **entries)
198{ 208{
199 union handle_parts parts = { .handle = handle }; 209 union handle_parts parts = { .handle = handle };
200 void *slab = stack_slabs[parts.slabindex]; 210 void *slab = stack_slabs[parts.slabindex];
201 size_t offset = parts.offset << STACK_ALLOC_ALIGN; 211 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
202 struct stack_record *stack = slab + offset; 212 struct stack_record *stack = slab + offset;
203 213
204 trace->nr_entries = trace->max_entries = stack->size; 214 *entries = stack->entries;
205 trace->entries = stack->entries; 215 return stack->size;
206 trace->skip = 0;
207} 216}
208EXPORT_SYMBOL_GPL(depot_fetch_stack); 217EXPORT_SYMBOL_GPL(stack_depot_fetch);
209 218
210/** 219/**
211 * depot_save_stack - save stack in a stack depot. 220 * stack_depot_save - Save a stack trace from an array
212 * @trace - the stacktrace to save. 221 *
213 * @alloc_flags - flags for allocating additional memory if required. 222 * @entries: Pointer to storage array
223 * @nr_entries: Size of the storage array
224 * @alloc_flags: Allocation gfp flags
214 * 225 *
215 * Returns the handle of the stack struct stored in depot. 226 * Return: The handle of the stack struct stored in depot
216 */ 227 */
217depot_stack_handle_t depot_save_stack(struct stack_trace *trace, 228depot_stack_handle_t stack_depot_save(unsigned long *entries,
218 gfp_t alloc_flags) 229 unsigned int nr_entries,
230 gfp_t alloc_flags)
219{ 231{
220 u32 hash;
221 depot_stack_handle_t retval = 0;
222 struct stack_record *found = NULL, **bucket; 232 struct stack_record *found = NULL, **bucket;
223 unsigned long flags; 233 depot_stack_handle_t retval = 0;
224 struct page *page = NULL; 234 struct page *page = NULL;
225 void *prealloc = NULL; 235 void *prealloc = NULL;
236 unsigned long flags;
237 u32 hash;
226 238
227 if (unlikely(trace->nr_entries == 0)) 239 if (unlikely(nr_entries == 0))
228 goto fast_exit; 240 goto fast_exit;
229 241
230 hash = hash_stack(trace->entries, trace->nr_entries); 242 hash = hash_stack(entries, nr_entries);
231 bucket = &stack_table[hash & STACK_HASH_MASK]; 243 bucket = &stack_table[hash & STACK_HASH_MASK];
232 244
233 /* 245 /*
@@ -235,8 +247,8 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
235 * The smp_load_acquire() here pairs with smp_store_release() to 247 * The smp_load_acquire() here pairs with smp_store_release() to
236 * |bucket| below. 248 * |bucket| below.
237 */ 249 */
238 found = find_stack(smp_load_acquire(bucket), trace->entries, 250 found = find_stack(smp_load_acquire(bucket), entries,
239 trace->nr_entries, hash); 251 nr_entries, hash);
240 if (found) 252 if (found)
241 goto exit; 253 goto exit;
242 254
@@ -264,10 +276,10 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
264 276
265 spin_lock_irqsave(&depot_lock, flags); 277 spin_lock_irqsave(&depot_lock, flags);
266 278
267 found = find_stack(*bucket, trace->entries, trace->nr_entries, hash); 279 found = find_stack(*bucket, entries, nr_entries, hash);
268 if (!found) { 280 if (!found) {
269 struct stack_record *new = 281 struct stack_record *new =
270 depot_alloc_stack(trace->entries, trace->nr_entries, 282 depot_alloc_stack(entries, nr_entries,
271 hash, &prealloc, alloc_flags); 283 hash, &prealloc, alloc_flags);
272 if (new) { 284 if (new) {
273 new->next = *bucket; 285 new->next = *bucket;
@@ -297,4 +309,4 @@ exit:
297fast_exit: 309fast_exit:
298 return retval; 310 return retval;
299} 311}
300EXPORT_SYMBOL_GPL(depot_save_stack); 312EXPORT_SYMBOL_GPL(stack_depot_save);