aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTan, Li <li.tan@intel.com>2008-05-23 02:54:09 -0400
committerAvi Kivity <avi@qumranet.com>2008-07-20 05:42:32 -0400
commit9ef621d3be56e1188300476a8102ff54f7b6793f (patch)
treef5576d1365e726823c3eb4f772481cf5e4d1d0c7
parent25be46080f1a446cb2bda3daadbd22a5682b955e (diff)
KVM: Support mixed endian machines
Currently kvmtrace is not portable. This will prevent from copying a trace file from big-endian target to little-endian workstation for analysis. In the patch, kernel outputs metadata containing a magic number to trace log, and changes 64-bit words to be u64 instead of a pair of u32s. Signed-off-by: Tan Li <li.tan@intel.com> Acked-by: Jerone Young <jyoung5@us.ibm.com> Acked-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--include/linux/kvm.h4
-rw-r--r--virt/kvm/kvm_trace.c18
2 files changed, 14 insertions, 8 deletions
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 1c908ac29c6c..0ea064cbfbc8 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -318,14 +318,14 @@ struct kvm_trace_rec {
318 __u32 vcpu_id; 318 __u32 vcpu_id;
319 union { 319 union {
320 struct { 320 struct {
321 __u32 cycle_lo, cycle_hi; 321 __u64 cycle_u64;
322 __u32 extra_u32[KVM_TRC_EXTRA_MAX]; 322 __u32 extra_u32[KVM_TRC_EXTRA_MAX];
323 } cycle; 323 } cycle;
324 struct { 324 struct {
325 __u32 extra_u32[KVM_TRC_EXTRA_MAX]; 325 __u32 extra_u32[KVM_TRC_EXTRA_MAX];
326 } nocycle; 326 } nocycle;
327 } u; 327 } u;
328}; 328} __attribute__((packed));
329 329
330#define KVMIO 0xAE 330#define KVMIO 0xAE
331 331
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c
index 0e495470788d..58141f31ea8f 100644
--- a/virt/kvm/kvm_trace.c
+++ b/virt/kvm/kvm_trace.c
@@ -72,11 +72,7 @@ static void kvm_add_trace(void *probe_private, void *call_data,
72 rec.cycle_in = p->cycle_in; 72 rec.cycle_in = p->cycle_in;
73 73
74 if (rec.cycle_in) { 74 if (rec.cycle_in) {
75 u64 cycle = 0; 75 rec.u.cycle.cycle_u64 = get_cycles();
76
77 cycle = get_cycles();
78 rec.u.cycle.cycle_lo = (u32)cycle;
79 rec.u.cycle.cycle_hi = (u32)(cycle >> 32);
80 76
81 for (i = 0; i < rec.extra_u32; i++) 77 for (i = 0; i < rec.extra_u32; i++)
82 rec.u.cycle.extra_u32[i] = va_arg(*args, u32); 78 rec.u.cycle.extra_u32[i] = va_arg(*args, u32);
@@ -114,8 +110,18 @@ static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
114{ 110{
115 struct kvm_trace *kt; 111 struct kvm_trace *kt;
116 112
117 if (!relay_buf_full(buf)) 113 if (!relay_buf_full(buf)) {
114 if (!prev_subbuf) {
115 /*
116 * executed only once when the channel is opened
117 * save metadata as first record
118 */
119 subbuf_start_reserve(buf, sizeof(u32));
120 *(u32 *)subbuf = 0x12345678;
121 }
122
118 return 1; 123 return 1;
124 }
119 125
120 kt = buf->chan->private_data; 126 kt = buf->chan->private_data;
121 atomic_inc(&kt->lost_records); 127 atomic_inc(&kt->lost_records);