aboutsummaryrefslogblamecommitdiffstats
path: root/arch/Kconfig
blob: 215e46073c453d63a691b4fc02bde826a9d4ad50 (plain) (tree)
1
2
3
4
5
6
7
8
9


                                        

               
                                            

                                
                          
                                     






                                                                      











                                                                      
                    
            











                                                                          
                                      
            
















                                                                     


                            



                                             






                                                                       
                        
            
 
                   
            

                      
            
 





                                                                        







                                                                           
            
 
                     
            

                              
            
 

                                     




                                                                            
 
               
            



                                                                   

                         


                                   


                         
                              
 

                                
 
                            
#
# General architecture dependent options
#

config OPROFILE
	tristate "OProfile system profiling"
	depends on PROFILING
	depends on HAVE_OPROFILE
	select RING_BUFFER
	select RING_BUFFER_ALLOW_SWAP
	help
	  OProfile is a profiling system capable of profiling the
	  whole system, include the kernel, kernel modules, libraries,
	  and applications.

	  If unsure, say N.

config OPROFILE_EVENT_MULTIPLEX
	bool "OProfile multiplexing support (EXPERIMENTAL)"
	default n
	depends on OPROFILE && X86
	help
	  The number of hardware counters is limited. The multiplexing
	  feature enables OProfile to gather more events than counters
	  are provided by the hardware. This is realized by switching
	  between events at an user specified time interval.

	  If unsure, say N.

config HAVE_OPROFILE
	bool

config KPROBES
	bool "Kprobes"
	depends on KALLSYMS && MODULES
	depends on HAVE_KPROBES
	help
	  Kprobes allows you to trap at almost any kernel address and
	  execute a callback function.  register_kprobe() establishes
	  a probepoint and specifies the callback.  Kprobes is useful
	  for kernel debugging, non-intrusive instrumentation and testing.
	  If in doubt, say "N".

config HAVE_EFFICIENT_UNALIGNED_ACCESS
	bool
	help
	  Some architectures are unable to perform unaligned accesses
	  without the use of get_unaligned/put_unaligned. Others are
	  unable to perform such accesses efficiently (e.g. trap on
	  unaligned access and require fixing it up in the exception
	  handler.)

	  This symbol should be selected by an architecture if it can
	  perform unaligned accesses efficiently to allow different
	  code paths to be selected for these cases. Some network
	  drivers, for example, could opt to not fix up alignment
	  problems with received packets if doing so would not help
	  much.

	  See Documentation/unaligned-memory-access.txt for more
	  information on the topic of unaligned memory accesses.

config HAVE_SYSCALL_WRAPPERS
	bool

config KRETPROBES
	def_bool y
	depends on KPROBES && HAVE_KRETPROBES

config USER_RETURN_NOTIFIER
	bool
	depends on HAVE_USER_RETURN_NOTIFIER
	help
	  Provide a kernel-internal notification when a cpu is about to
	  switch to user mode.

config HAVE_IOREMAP_PROT
	bool

config HAVE_KPROBES
	bool

config HAVE_KRETPROBES
	bool

#
# An arch should select this if it provides all these things:
#
#	task_pt_regs()		in asm/processor.h or asm/ptrace.h
#	arch_has_single_step()	if there is hardware single-step support
#	arch_has_block_step()	if there is hardware block-step support
#	asm/syscall.h		supplying asm-generic/syscall.h interface
#	linux/regset.h		user_regset interfaces
#	CORE_DUMP_USE_REGSET	#define'd in linux/elf.h
#	TIF_SYSCALL_TRACE	calls tracehook_report_syscall_{entry,exit}
#	TIF_NOTIFY_RESUME	calls tracehook_notify_resume()
#	signal delivery		calls tracehook_signal_handler()
#
config HAVE_ARCH_TRACEHOOK
	bool

config HAVE_DMA_ATTRS
	bool

config USE_GENERIC_SMP_HELPERS
	bool

config HAVE_REGS_AND_STACK_ACCESS_API
	bool
	help
	  This symbol should be selected by an architecure if it supports
	  the API needed to access registers and stack entries from pt_regs,
	  declared in asm/ptrace.h
	  For example the kprobes-based event tracer needs this API.

config HAVE_CLK
	bool
	help
	  The <linux/clk.h> calls support software clock gating and
	  thus are a key power management tool on many systems.

config HAVE_DMA_API_DEBUG
	bool

config HAVE_DEFAULT_NO_SPIN_MUTEXES
	bool

config HAVE_HW_BREAKPOINT
	bool
	depends on PERF_EVENTS

config HAVE_USER_RETURN_NOTIFIER
	bool

source "kernel/gcov/Kconfig"
a radix priority search tree @root. The * algorithm typically takes O(log n) time where 'log n' is the number of bits * required to represent the maximum heap_index. In the worst case, the algo * can take O((log n)^2) - check prio_tree_expand. * * If a prior node with same radix_index and heap_index is already found in * the tree, then returns the address of the prior node. Otherwise, inserts * @node into the tree and returns @node. */ struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, struct prio_tree_node *node) { struct prio_tree_node *cur, *res = node; unsigned long radix_index, heap_index; unsigned long r_index, h_index, index, mask; int size_flag = 0; get_index(root, node, &radix_index, &heap_index); if (prio_tree_empty(root) || heap_index > prio_tree_maxindex(root->index_bits)) return prio_tree_expand(root, node, heap_index); cur = root->prio_tree_node; mask = 1UL << (root->index_bits - 1); while (mask) { get_index(root, cur, &r_index, &h_index); if (r_index == radix_index && h_index == heap_index) return cur; if (h_index < heap_index || (h_index == heap_index && r_index > radix_index)) { struct prio_tree_node *tmp = node; node = prio_tree_replace(root, cur, node); cur = tmp; /* swap indices */ index = r_index; r_index = radix_index; radix_index = index; index = h_index; h_index = heap_index; heap_index = index; } if (size_flag) index = heap_index - radix_index; else index = radix_index; if (index & mask) { if (prio_tree_right_empty(cur)) { INIT_PRIO_TREE_NODE(node); cur->right = node; node->parent = cur; return res; } else cur = cur->right; } else { if (prio_tree_left_empty(cur)) { INIT_PRIO_TREE_NODE(node); cur->left = node; node->parent = cur; return res; } else cur = cur->left; } mask >>= 1; if (!mask) { mask = 1UL << (BITS_PER_LONG - 1); size_flag = 1; } } /* Should not reach here */ BUG(); return NULL; } /* * Remove a prio_tree_node @node from a radix priority search tree @root. The * algorithm takes O(log n) time where 'log n' is the number of bits required * to represent the maximum heap_index. */ void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node) { struct prio_tree_node *cur; unsigned long r_index, h_index_right, h_index_left; cur = node; while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) { if (!prio_tree_left_empty(cur)) get_index(root, cur->left, &r_index, &h_index_left); else { cur = cur->right; continue; } if (!prio_tree_right_empty(cur)) get_index(root, cur->right, &r_index, &h_index_right); else { cur = cur->left; continue; } /* both h_index_left and h_index_right cannot be 0 */ if (h_index_left >= h_index_right) cur = cur->left; else cur = cur->right; } if (prio_tree_root(cur)) { BUG_ON(root->prio_tree_node != cur); __INIT_PRIO_TREE_ROOT(root, root->raw); return; } if (cur->parent->right == cur) cur->parent->right = cur->parent; else cur->parent->left = cur->parent; while (cur != node) cur = prio_tree_replace(root, cur->parent, cur); } /* * Following functions help to enumerate all prio_tree_nodes in the tree that * overlap with the input interval X [radix_index, heap_index]. The enumeration * takes O(log n + m) time where 'log n' is the height of the tree (which is * proportional to # of bits required to represent the maximum heap_index) and * 'm' is the number of prio_tree_nodes that overlap the interval X. */ static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter, unsigned long *r_index, unsigned long *h_index) { if (prio_tree_left_empty(iter->cur)) return NULL; get_index(iter->root, iter->cur->left, r_index, h_index); if (iter->r_index <= *h_index) { iter->cur = iter->cur->left; iter->mask >>= 1; if (iter->mask) { if (iter->size_level) iter->size_level++; } else { if (iter->size_level) { BUG_ON(!prio_tree_left_empty(iter->cur)); BUG_ON(!prio_tree_right_empty(iter->cur)); iter->size_level++; iter->mask = ULONG_MAX; } else { iter->size_level = 1; iter->mask = 1UL << (BITS_PER_LONG - 1); } } return iter->cur; } return NULL; } static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter, unsigned long *r_index, unsigned long *h_index) { unsigned long value; if (prio_tree_right_empty(iter->cur)) return NULL; if (iter->size_level) value = iter->value; else value = iter->value | iter->mask; if (iter->h_index < value) return NULL; get_index(iter->root, iter->cur->right, r_index, h_index); if (iter->r_index <= *h_index) { iter->cur = iter->cur->right; iter->mask >>= 1; iter->value = value; if (iter->mask) { if (iter->size_level) iter->size_level++; } else { if (iter->size_level) { BUG_ON(!prio_tree_left_empty(iter->cur)); BUG_ON(!prio_tree_right_empty(iter->cur)); iter->size_level++; iter->mask = ULONG_MAX; } else { iter->size_level = 1; iter->mask = 1UL << (BITS_PER_LONG - 1); } } return iter->cur; } return NULL; } static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter) { iter->cur = iter->cur->parent; if (iter->mask == ULONG_MAX) iter->mask = 1UL; else if (iter->size_level == 1) iter->mask = 1UL; else iter->mask <<= 1; if (iter->size_level) iter->size_level--; if (!iter->size_level && (iter->value & iter->mask)) iter->value ^= iter->mask; return iter->cur; } static inline int overlap(struct prio_tree_iter *iter, unsigned long r_index, unsigned long h_index) { return iter->h_index >= r_index && iter->r_index <= h_index; } /* * prio_tree_first: * * Get the first prio_tree_node that overlaps with the interval [radix_index, * heap_index]. Note that always radix_index <= heap_index. We do a pre-order * traversal of the tree. */ static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter) { struct prio_tree_root *root; unsigned long r_index, h_index; INIT_PRIO_TREE_ITER(iter); root = iter->root; if (prio_tree_empty(root)) return NULL; get_index(root, root->prio_tree_node, &r_index, &h_index); if (iter->r_index > h_index) return NULL; iter->mask = 1UL << (root->index_bits - 1); iter->cur = root->prio_tree_node; while (1) { if (overlap(iter, r_index, h_index)) return iter->cur; if (prio_tree_left(iter, &r_index, &h_index)) continue; if (prio_tree_right(iter, &r_index, &h_index)) continue; break; } return NULL; } /* * prio_tree_next: * * Get the next prio_tree_node that overlaps with the input interval in iter */ struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter) { unsigned long r_index, h_index; if (iter->cur == NULL) return prio_tree_first(iter); repeat: while (prio_tree_left(iter, &r_index, &h_index)) if (overlap(iter, r_index, h_index)) return iter->cur; while (!prio_tree_right(iter, &r_index, &h_index)) { while (!prio_tree_root(iter->cur) && iter->cur->parent->right == iter->cur) prio_tree_parent(iter); if (prio_tree_root(iter->cur)) return NULL; prio_tree_parent(iter); } if (overlap(iter, r_index, h_index)) return iter->cur; goto repeat; }