aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c99
1 files changed, 70 insertions, 29 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 60f3b6289731..756d7283318b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
117 * 117 *
118 * It is default off, but you can enable it with either specifying 118 * It is default off, but you can enable it with either specifying
119 * "ftrace_dump_on_oops" in the kernel command line, or setting 119 * "ftrace_dump_on_oops" in the kernel command line, or setting
120 * /proc/sys/kernel/ftrace_dump_on_oops to true. 120 * /proc/sys/kernel/ftrace_dump_on_oops
121 * Set 1 if you want to dump buffers of all CPUs
122 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 */ 123 */
122int ftrace_dump_on_oops; 124
125enum ftrace_dump_mode ftrace_dump_on_oops;
123 126
124static int tracing_set_tracer(const char *buf); 127static int tracing_set_tracer(const char *buf);
125 128
@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace);
139 142
140static int __init set_ftrace_dump_on_oops(char *str) 143static int __init set_ftrace_dump_on_oops(char *str)
141{ 144{
142 ftrace_dump_on_oops = 1; 145 if (*str++ != '=' || !*str) {
143 return 1; 146 ftrace_dump_on_oops = DUMP_ALL;
147 return 1;
148 }
149
150 if (!strcmp("orig_cpu", str)) {
151 ftrace_dump_on_oops = DUMP_ORIG;
152 return 1;
153 }
154
155 return 0;
144} 156}
145__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 157__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
146 158
@@ -1571,7 +1583,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1571{ 1583{
1572 struct ring_buffer *buffer = iter->tr->buffer; 1584 struct ring_buffer *buffer = iter->tr->buffer;
1573 struct trace_entry *ent, *next = NULL; 1585 struct trace_entry *ent, *next = NULL;
1574 unsigned long lost_events, next_lost = 0; 1586 unsigned long lost_events = 0, next_lost = 0;
1575 int cpu_file = iter->cpu_file; 1587 int cpu_file = iter->cpu_file;
1576 u64 next_ts = 0, ts; 1588 u64 next_ts = 0, ts;
1577 int next_cpu = -1; 1589 int next_cpu = -1;
@@ -1796,7 +1808,7 @@ static void print_func_help_header(struct seq_file *m)
1796} 1808}
1797 1809
1798 1810
1799static void 1811void
1800print_trace_header(struct seq_file *m, struct trace_iterator *iter) 1812print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1801{ 1813{
1802 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1814 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -2005,7 +2017,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2005 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; 2017 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
2006} 2018}
2007 2019
2008static int trace_empty(struct trace_iterator *iter) 2020int trace_empty(struct trace_iterator *iter)
2009{ 2021{
2010 int cpu; 2022 int cpu;
2011 2023
@@ -2072,6 +2084,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2072 return print_trace_fmt(iter); 2084 return print_trace_fmt(iter);
2073} 2085}
2074 2086
2087void trace_default_header(struct seq_file *m)
2088{
2089 struct trace_iterator *iter = m->private;
2090
2091 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2092 /* print nothing if the buffers are empty */
2093 if (trace_empty(iter))
2094 return;
2095 print_trace_header(m, iter);
2096 if (!(trace_flags & TRACE_ITER_VERBOSE))
2097 print_lat_help_header(m);
2098 } else {
2099 if (!(trace_flags & TRACE_ITER_VERBOSE))
2100 print_func_help_header(m);
2101 }
2102}
2103
2075static int s_show(struct seq_file *m, void *v) 2104static int s_show(struct seq_file *m, void *v)
2076{ 2105{
2077 struct trace_iterator *iter = v; 2106 struct trace_iterator *iter = v;
@@ -2084,17 +2113,9 @@ static int s_show(struct seq_file *m, void *v)
2084 } 2113 }
2085 if (iter->trace && iter->trace->print_header) 2114 if (iter->trace && iter->trace->print_header)
2086 iter->trace->print_header(m); 2115 iter->trace->print_header(m);
2087 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2116 else
2088 /* print nothing if the buffers are empty */ 2117 trace_default_header(m);
2089 if (trace_empty(iter)) 2118
2090 return 0;
2091 print_trace_header(m, iter);
2092 if (!(trace_flags & TRACE_ITER_VERBOSE))
2093 print_lat_help_header(m);
2094 } else {
2095 if (!(trace_flags & TRACE_ITER_VERBOSE))
2096 print_func_help_header(m);
2097 }
2098 } else if (iter->leftover) { 2119 } else if (iter->leftover) {
2099 /* 2120 /*
2100 * If we filled the seq_file buffer earlier, we 2121 * If we filled the seq_file buffer earlier, we
@@ -2180,15 +2201,20 @@ __tracing_open(struct inode *inode, struct file *file)
2180 2201
2181 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2202 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2182 for_each_tracing_cpu(cpu) { 2203 for_each_tracing_cpu(cpu) {
2183
2184 iter->buffer_iter[cpu] = 2204 iter->buffer_iter[cpu] =
2185 ring_buffer_read_start(iter->tr->buffer, cpu); 2205 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2206 }
2207 ring_buffer_read_prepare_sync();
2208 for_each_tracing_cpu(cpu) {
2209 ring_buffer_read_start(iter->buffer_iter[cpu]);
2186 tracing_iter_reset(iter, cpu); 2210 tracing_iter_reset(iter, cpu);
2187 } 2211 }
2188 } else { 2212 } else {
2189 cpu = iter->cpu_file; 2213 cpu = iter->cpu_file;
2190 iter->buffer_iter[cpu] = 2214 iter->buffer_iter[cpu] =
2191 ring_buffer_read_start(iter->tr->buffer, cpu); 2215 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2216 ring_buffer_read_prepare_sync();
2217 ring_buffer_read_start(iter->buffer_iter[cpu]);
2192 tracing_iter_reset(iter, cpu); 2218 tracing_iter_reset(iter, cpu);
2193 } 2219 }
2194 2220
@@ -4338,7 +4364,7 @@ static int trace_panic_handler(struct notifier_block *this,
4338 unsigned long event, void *unused) 4364 unsigned long event, void *unused)
4339{ 4365{
4340 if (ftrace_dump_on_oops) 4366 if (ftrace_dump_on_oops)
4341 ftrace_dump(); 4367 ftrace_dump(ftrace_dump_on_oops);
4342 return NOTIFY_OK; 4368 return NOTIFY_OK;
4343} 4369}
4344 4370
@@ -4355,7 +4381,7 @@ static int trace_die_handler(struct notifier_block *self,
4355 switch (val) { 4381 switch (val) {
4356 case DIE_OOPS: 4382 case DIE_OOPS:
4357 if (ftrace_dump_on_oops) 4383 if (ftrace_dump_on_oops)
4358 ftrace_dump(); 4384 ftrace_dump(ftrace_dump_on_oops);
4359 break; 4385 break;
4360 default: 4386 default:
4361 break; 4387 break;
@@ -4396,7 +4422,8 @@ trace_printk_seq(struct trace_seq *s)
4396 trace_seq_init(s); 4422 trace_seq_init(s);
4397} 4423}
4398 4424
4399static void __ftrace_dump(bool disable_tracing) 4425static void
4426__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4400{ 4427{
4401 static arch_spinlock_t ftrace_dump_lock = 4428 static arch_spinlock_t ftrace_dump_lock =
4402 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 4429 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -4429,12 +4456,25 @@ static void __ftrace_dump(bool disable_tracing)
4429 /* don't look at user memory in panic mode */ 4456 /* don't look at user memory in panic mode */
4430 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4457 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4431 4458
4432 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4433
4434 /* Simulate the iterator */ 4459 /* Simulate the iterator */
4435 iter.tr = &global_trace; 4460 iter.tr = &global_trace;
4436 iter.trace = current_trace; 4461 iter.trace = current_trace;
4437 iter.cpu_file = TRACE_PIPE_ALL_CPU; 4462
4463 switch (oops_dump_mode) {
4464 case DUMP_ALL:
4465 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4466 break;
4467 case DUMP_ORIG:
4468 iter.cpu_file = raw_smp_processor_id();
4469 break;
4470 case DUMP_NONE:
4471 goto out_enable;
4472 default:
4473 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4474 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4475 }
4476
4477 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4438 4478
4439 /* 4479 /*
4440 * We need to stop all tracing on all CPUS to read the 4480 * We need to stop all tracing on all CPUS to read the
@@ -4473,6 +4513,7 @@ static void __ftrace_dump(bool disable_tracing)
4473 else 4513 else
4474 printk(KERN_TRACE "---------------------------------\n"); 4514 printk(KERN_TRACE "---------------------------------\n");
4475 4515
4516 out_enable:
4476 /* Re-enable tracing if requested */ 4517 /* Re-enable tracing if requested */
4477 if (!disable_tracing) { 4518 if (!disable_tracing) {
4478 trace_flags |= old_userobj; 4519 trace_flags |= old_userobj;
@@ -4489,9 +4530,9 @@ static void __ftrace_dump(bool disable_tracing)
4489} 4530}
4490 4531
4491/* By default: disable tracing after the dump */ 4532/* By default: disable tracing after the dump */
4492void ftrace_dump(void) 4533void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4493{ 4534{
4494 __ftrace_dump(true); 4535 __ftrace_dump(true, oops_dump_mode);
4495} 4536}
4496 4537
4497__init static int tracer_alloc_buffers(void) 4538__init static int tracer_alloc_buffers(void)
/a>
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987




















                                                                
                       

                     














                                                                 
                                                                      
                                                   
                                                                       
                                                                   














                                                                            
                                          

































                                                                                 
  


































                                                                            
                                           

                       
                                                       













                                                                                

                                               

                                                                          









                                             


                                                                              
                      
         





















                                                                          

                       



                                                                             
                                                       


                                                         
                                                          




                                          








                                                                            
                                                           






                                                                               
                                                                 






                                                                                 

                                                                                




















                                                                           


                                                        





                                                            










                                                                             

                                                     

                                    
                                            






                                                                   

                                              


                                                  
 





                                                                            
                                                                  



                                                                          
 






















































































                                                                               



                                                    
                                                                             



                                                                            
                                                                            


                                                                                
 


                                                                  
 














                                                              
                                                                       






                                          
                                                                  
 



                                                                               

                                                     












                                                                              
         
















                                                                      
                         



                            


                                                        


                                                             
                              

























                                                               
                                                                  

























                                                                               



                                                                  
























                                                                           



                                                                           
                                        
                                                                            











































                                                                              
                                                                             











                                                   
                                                           




                                             


                                           











                                                                              

                                   





























                                                                         



                                        






                                                              
               



















                                                            












                                                                       
                         


























                                                                  




                                                               























                                                            
                                               






















































                                                                               
                                                                  



































































































                                                                               
                                                                















































                                                                               
/* CacheFiles path walking and related routines
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public Licence
 * as published by the Free Software Foundation; either version
 * 2 of the Licence, or (at your option) any later version.
 */

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/quotaops.h>
#include <linux/xattr.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/slab.h>
#include "internal.h"

#define CACHEFILES_KEYBUF_SIZE 512

/*
 * dump debugging info about an object
 */
static noinline
void __cachefiles_printk_object(struct cachefiles_object *object,
				const char *prefix,
				u8 *keybuf)
{
	struct fscache_cookie *cookie;
	unsigned keylen, loop;

	printk(KERN_ERR "%sobject: OBJ%x\n",
	       prefix, object->fscache.debug_id);
	printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
	       prefix, object->fscache.state->name,
	       object->fscache.flags, work_busy(&object->fscache.work),
	       object->fscache.events, object->fscache.event_mask);
	printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
	       prefix, object->fscache.n_ops, object->fscache.n_in_progress,
	       object->fscache.n_exclusive);
	printk(KERN_ERR "%sparent=%p\n",
	       prefix, object->fscache.parent);

	spin_lock(&object->fscache.lock);
	cookie = object->fscache.cookie;
	if (cookie) {
		printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
		       prefix,
		       object->fscache.cookie,
		       object->fscache.cookie->parent,
		       object->fscache.cookie->netfs_data,
		       object->fscache.cookie->flags);
		if (keybuf && cookie->def)
			keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
						      CACHEFILES_KEYBUF_SIZE);
		else
			keylen = 0;
	} else {
		printk(KERN_ERR "%scookie=NULL\n", prefix);
		keylen = 0;
	}
	spin_unlock(&object->fscache.lock);

	if (keylen) {
		printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
		for (loop = 0; loop < keylen; loop++)
			printk("%02x", keybuf[loop]);
		printk("'\n");
	}
}

/*
 * dump debugging info about a pair of objects
 */
static noinline void cachefiles_printk_object(struct cachefiles_object *object,
					      struct cachefiles_object *xobject)
{
	u8 *keybuf;

	keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
	if (object)
		__cachefiles_printk_object(object, "", keybuf);
	if (xobject)
		__cachefiles_printk_object(xobject, "x", keybuf);
	kfree(keybuf);
}

/*
 * mark the owner of a dentry, if there is one, to indicate that that dentry
 * has been preemptively deleted
 * - the caller must hold the i_mutex on the dentry's parent as required to
 *   call vfs_unlink(), vfs_rmdir() or vfs_rename()
 */
static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
					  struct dentry *dentry)
{
	struct cachefiles_object *object;
	struct rb_node *p;

	_enter(",'%*.*s'",
	       dentry->d_name.len, dentry->d_name.len, dentry->d_name.name);

	write_lock(&cache->active_lock);

	p = cache->active_nodes.rb_node;
	while (p) {
		object = rb_entry(p, struct cachefiles_object, active_node);
		if (object->dentry > dentry)
			p = p->rb_left;
		else if (object->dentry < dentry)
			p = p->rb_right;
		else
			goto found_dentry;
	}

	write_unlock(&cache->active_lock);
	_leave(" [no owner]");
	return;

	/* found the dentry for  */
found_dentry:
	kdebug("preemptive burial: OBJ%x [%s] %p",
	       object->fscache.debug_id,
	       object->fscache.state->name,
	       dentry);

	if (fscache_object_is_live(&object->fscache)) {
		printk(KERN_ERR "\n");
		printk(KERN_ERR "CacheFiles: Error:"
		       " Can't preemptively bury live object\n");
		cachefiles_printk_object(object, NULL);
	} else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
		printk(KERN_ERR "CacheFiles: Error:"
		       " Object already preemptively buried\n");
	}

	write_unlock(&cache->active_lock);
	_leave(" [owner marked]");
}

/*
 * record the fact that an object is now active
 */
static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
					 struct cachefiles_object *object)
{
	struct cachefiles_object *xobject;
	struct rb_node **_p, *_parent = NULL;
	struct dentry *dentry;

	_enter(",%p", object);

try_again:
	write_lock(&cache->active_lock);

	if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
		printk(KERN_ERR "CacheFiles: Error: Object already active\n");
		cachefiles_printk_object(object, NULL);
		BUG();
	}

	dentry = object->dentry;
	_p = &cache->active_nodes.rb_node;
	while (*_p) {
		_parent = *_p;
		xobject = rb_entry(_parent,
				   struct cachefiles_object, active_node);

		ASSERT(xobject != object);

		if (xobject->dentry > dentry)
			_p = &(*_p)->rb_left;
		else if (xobject->dentry < dentry)
			_p = &(*_p)->rb_right;
		else
			goto wait_for_old_object;
	}

	rb_link_node(&object->active_node, _parent, _p);
	rb_insert_color(&object->active_node, &cache->active_nodes);

	write_unlock(&cache->active_lock);
	_leave(" = 0");
	return 0;

	/* an old object from a previous incarnation is hogging the slot - we
	 * need to wait for it to be destroyed */
wait_for_old_object:
	if (fscache_object_is_live(&object->fscache)) {
		printk(KERN_ERR "\n");
		printk(KERN_ERR "CacheFiles: Error:"
		       " Unexpected object collision\n");
		cachefiles_printk_object(object, xobject);
		BUG();
	}
	atomic_inc(&xobject->usage);
	write_unlock(&cache->active_lock);

	if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
		wait_queue_head_t *wq;

		signed long timeout = 60 * HZ;
		wait_queue_t wait;
		bool requeue;

		/* if the object we're waiting for is queued for processing,
		 * then just put ourselves on the queue behind it */
		if (work_pending(&xobject->fscache.work)) {
			_debug("queue OBJ%x behind OBJ%x immediately",
			       object->fscache.debug_id,
			       xobject->fscache.debug_id);
			goto requeue;
		}

		/* otherwise we sleep until either the object we're waiting for
		 * is done, or the fscache_object is congested */
		wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
		init_wait(&wait);
		requeue = false;
		do {
			prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
			if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
				break;

			requeue = fscache_object_sleep_till_congested(&timeout);
		} while (timeout > 0 && !requeue);
		finish_wait(wq, &wait);

		if (requeue &&
		    test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
			_debug("queue OBJ%x behind OBJ%x after wait",
			       object->fscache.debug_id,
			       xobject->fscache.debug_id);
			goto requeue;
		}

		if (timeout <= 0) {
			printk(KERN_ERR "\n");
			printk(KERN_ERR "CacheFiles: Error: Overlong"
			       " wait for old active object to go away\n");
			cachefiles_printk_object(object, xobject);
			goto requeue;
		}
	}

	ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));

	cache->cache.ops->put_object(&xobject->fscache);
	goto try_again;

requeue:
	clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
	cache->cache.ops->put_object(&xobject->fscache);
	_leave(" = -ETIMEDOUT");
	return -ETIMEDOUT;
}

/*
 * delete an object representation from the cache
 * - file backed objects are unlinked
 * - directory backed objects are stuffed into the graveyard for userspace to
 *   delete
 * - unlocks the directory mutex
 */
static int cachefiles_bury_object(struct cachefiles_cache *cache,
				  struct dentry *dir,
				  struct dentry *rep,
				  bool preemptive)
{
	struct dentry *grave, *trap;
	struct path path, path_to_graveyard;
	char nbuffer[8 + 8 + 1];
	int ret;

	_enter(",'%*.*s','%*.*s'",
	       dir->d_name.len, dir->d_name.len, dir->d_name.name,
	       rep->d_name.len, rep->d_name.len, rep->d_name.name);