aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries/dtl.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/pseries/dtl.c')
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c224
1 files changed, 169 insertions, 55 deletions
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index a00addb55945..c371bc06434b 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -23,37 +23,22 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/spinlock.h>
26#include <asm/smp.h> 27#include <asm/smp.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/firmware.h> 30#include <asm/firmware.h>
31#include <asm/lppaca.h>
30 32
31#include "plpar_wrappers.h" 33#include "plpar_wrappers.h"
32 34
33/*
34 * Layout of entries in the hypervisor's DTL buffer. Although we don't
35 * actually access the internals of an entry (we only need to know the size),
36 * we might as well define it here for reference.
37 */
38struct dtl_entry {
39 u8 dispatch_reason;
40 u8 preempt_reason;
41 u16 processor_id;
42 u32 enqueue_to_dispatch_time;
43 u32 ready_to_enqueue_time;
44 u32 waiting_to_ready_time;
45 u64 timebase;
46 u64 fault_addr;
47 u64 srr0;
48 u64 srr1;
49};
50
51struct dtl { 35struct dtl {
52 struct dtl_entry *buf; 36 struct dtl_entry *buf;
53 struct dentry *file; 37 struct dentry *file;
54 int cpu; 38 int cpu;
55 int buf_entries; 39 int buf_entries;
56 u64 last_idx; 40 u64 last_idx;
41 spinlock_t lock;
57}; 42};
58static DEFINE_PER_CPU(struct dtl, cpu_dtl); 43static DEFINE_PER_CPU(struct dtl, cpu_dtl);
59 44
@@ -72,25 +57,97 @@ static u8 dtl_event_mask = 0x7;
72static int dtl_buf_entries = (16 * 85); 57static int dtl_buf_entries = (16 * 85);
73 58
74 59
75static int dtl_enable(struct dtl *dtl) 60#ifdef CONFIG_VIRT_CPU_ACCOUNTING
61struct dtl_ring {
62 u64 write_index;
63 struct dtl_entry *write_ptr;
64 struct dtl_entry *buf;
65 struct dtl_entry *buf_end;
66 u8 saved_dtl_mask;
67};
68
69static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
70
71static atomic_t dtl_count;
72
73/*
74 * The cpu accounting code controls the DTL ring buffer, and we get
75 * given entries as they are processed.
76 */
77static void consume_dtle(struct dtl_entry *dtle, u64 index)
76{ 78{
77 unsigned long addr; 79 struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings);
78 int ret, hwcpu; 80 struct dtl_entry *wp = dtlr->write_ptr;
81 struct lppaca *vpa = local_paca->lppaca_ptr;
79 82
80 /* only allow one reader */ 83 if (!wp)
81 if (dtl->buf) 84 return;
82 return -EBUSY;
83 85
84 /* we need to store the original allocation size for use during read */ 86 *wp = *dtle;
85 dtl->buf_entries = dtl_buf_entries; 87 barrier();
86 88
87 dtl->buf = kmalloc_node(dtl->buf_entries * sizeof(struct dtl_entry), 89 /* check for hypervisor ring buffer overflow, ignore this entry if so */
88 GFP_KERNEL, cpu_to_node(dtl->cpu)); 90 if (index + N_DISPATCH_LOG < vpa->dtl_idx)
89 if (!dtl->buf) { 91 return;
90 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", 92
91 __func__, dtl->cpu); 93 ++wp;
92 return -ENOMEM; 94 if (wp == dtlr->buf_end)
93 } 95 wp = dtlr->buf;
96 dtlr->write_ptr = wp;
97
98 /* incrementing write_index makes the new entry visible */
99 smp_wmb();
100 ++dtlr->write_index;
101}
102
103static int dtl_start(struct dtl *dtl)
104{
105 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
106
107 dtlr->buf = dtl->buf;
108 dtlr->buf_end = dtl->buf + dtl->buf_entries;
109 dtlr->write_index = 0;
110
111 /* setting write_ptr enables logging into our buffer */
112 smp_wmb();
113 dtlr->write_ptr = dtl->buf;
114
115 /* enable event logging */
116 dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
117 lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
118
119 dtl_consumer = consume_dtle;
120 atomic_inc(&dtl_count);
121 return 0;
122}
123
124static void dtl_stop(struct dtl *dtl)
125{
126 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
127
128 dtlr->write_ptr = NULL;
129 smp_wmb();
130
131 dtlr->buf = NULL;
132
133 /* restore dtl_enable_mask */
134 lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
135
136 if (atomic_dec_and_test(&dtl_count))
137 dtl_consumer = NULL;
138}
139
140static u64 dtl_current_index(struct dtl *dtl)
141{
142 return per_cpu(dtl_rings, dtl->cpu).write_index;
143}
144
145#else /* CONFIG_VIRT_CPU_ACCOUNTING */
146
147static int dtl_start(struct dtl *dtl)
148{
149 unsigned long addr;
150 int ret, hwcpu;
94 151
95 /* Register our dtl buffer with the hypervisor. The HV expects the 152 /* Register our dtl buffer with the hypervisor. The HV expects the
96 * buffer size to be passed in the second word of the buffer */ 153 * buffer size to be passed in the second word of the buffer */
@@ -102,34 +159,82 @@ static int dtl_enable(struct dtl *dtl)
102 if (ret) { 159 if (ret) {
103 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) " 160 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
104 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret); 161 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
105 kfree(dtl->buf);
106 return -EIO; 162 return -EIO;
107 } 163 }
108 164
109 /* set our initial buffer indices */ 165 /* set our initial buffer indices */
110 dtl->last_idx = lppaca[dtl->cpu].dtl_idx = 0; 166 lppaca_of(dtl->cpu).dtl_idx = 0;
111 167
112 /* ensure that our updates to the lppaca fields have occurred before 168 /* ensure that our updates to the lppaca fields have occurred before
113 * we actually enable the logging */ 169 * we actually enable the logging */
114 smp_wmb(); 170 smp_wmb();
115 171
116 /* enable event logging */ 172 /* enable event logging */
117 lppaca[dtl->cpu].dtl_enable_mask = dtl_event_mask; 173 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
118 174
119 return 0; 175 return 0;
120} 176}
121 177
122static void dtl_disable(struct dtl *dtl) 178static void dtl_stop(struct dtl *dtl)
123{ 179{
124 int hwcpu = get_hard_smp_processor_id(dtl->cpu); 180 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
125 181
126 lppaca[dtl->cpu].dtl_enable_mask = 0x0; 182 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
127 183
128 unregister_dtl(hwcpu, __pa(dtl->buf)); 184 unregister_dtl(hwcpu, __pa(dtl->buf));
185}
186
187static u64 dtl_current_index(struct dtl *dtl)
188{
189 return lppaca_of(dtl->cpu).dtl_idx;
190}
191#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
192
193static int dtl_enable(struct dtl *dtl)
194{
195 long int n_entries;
196 long int rc;
197 struct dtl_entry *buf = NULL;
129 198
199 /* only allow one reader */
200 if (dtl->buf)
201 return -EBUSY;
202
203 n_entries = dtl_buf_entries;
204 buf = kmalloc_node(n_entries * sizeof(struct dtl_entry),
205 GFP_KERNEL, cpu_to_node(dtl->cpu));
206 if (!buf) {
207 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
208 __func__, dtl->cpu);
209 return -ENOMEM;
210 }
211
212 spin_lock(&dtl->lock);
213 rc = -EBUSY;
214 if (!dtl->buf) {
215 /* store the original allocation size for use during read */
216 dtl->buf_entries = n_entries;
217 dtl->buf = buf;
218 dtl->last_idx = 0;
219 rc = dtl_start(dtl);
220 if (rc)
221 dtl->buf = NULL;
222 }
223 spin_unlock(&dtl->lock);
224
225 if (rc)
226 kfree(buf);
227 return rc;
228}
229
230static void dtl_disable(struct dtl *dtl)
231{
232 spin_lock(&dtl->lock);
233 dtl_stop(dtl);
130 kfree(dtl->buf); 234 kfree(dtl->buf);
131 dtl->buf = NULL; 235 dtl->buf = NULL;
132 dtl->buf_entries = 0; 236 dtl->buf_entries = 0;
237 spin_unlock(&dtl->lock);
133} 238}
134 239
135/* file interface */ 240/* file interface */
@@ -157,8 +262,9 @@ static int dtl_file_release(struct inode *inode, struct file *filp)
157static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len, 262static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
158 loff_t *pos) 263 loff_t *pos)
159{ 264{
160 int rc, cur_idx, last_idx, n_read, n_req, read_size; 265 long int rc, n_read, n_req, read_size;
161 struct dtl *dtl; 266 struct dtl *dtl;
267 u64 cur_idx, last_idx, i;
162 268
163 if ((len % sizeof(struct dtl_entry)) != 0) 269 if ((len % sizeof(struct dtl_entry)) != 0)
164 return -EINVAL; 270 return -EINVAL;
@@ -171,41 +277,48 @@ static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
171 /* actual number of entries read */ 277 /* actual number of entries read */
172 n_read = 0; 278 n_read = 0;
173 279
174 cur_idx = lppaca[dtl->cpu].dtl_idx; 280 spin_lock(&dtl->lock);
281
282 cur_idx = dtl_current_index(dtl);
175 last_idx = dtl->last_idx; 283 last_idx = dtl->last_idx;
176 284
177 if (cur_idx - last_idx > dtl->buf_entries) { 285 if (last_idx + dtl->buf_entries <= cur_idx)
178 pr_debug("%s: hv buffer overflow for cpu %d, samples lost\n", 286 last_idx = cur_idx - dtl->buf_entries + 1;
179 __func__, dtl->cpu); 287
180 } 288 if (last_idx + n_req > cur_idx)
289 n_req = cur_idx - last_idx;
290
291 if (n_req > 0)
292 dtl->last_idx = last_idx + n_req;
293
294 spin_unlock(&dtl->lock);
295
296 if (n_req <= 0)
297 return 0;
181 298
182 cur_idx %= dtl->buf_entries; 299 i = last_idx % dtl->buf_entries;
183 last_idx %= dtl->buf_entries;
184 300
185 /* read the tail of the buffer if we've wrapped */ 301 /* read the tail of the buffer if we've wrapped */
186 if (last_idx > cur_idx) { 302 if (i + n_req > dtl->buf_entries) {
187 read_size = min(n_req, dtl->buf_entries - last_idx); 303 read_size = dtl->buf_entries - i;
188 304
189 rc = copy_to_user(buf, &dtl->buf[last_idx], 305 rc = copy_to_user(buf, &dtl->buf[i],
190 read_size * sizeof(struct dtl_entry)); 306 read_size * sizeof(struct dtl_entry));
191 if (rc) 307 if (rc)
192 return -EFAULT; 308 return -EFAULT;
193 309
194 last_idx = 0; 310 i = 0;
195 n_req -= read_size; 311 n_req -= read_size;
196 n_read += read_size; 312 n_read += read_size;
197 buf += read_size * sizeof(struct dtl_entry); 313 buf += read_size * sizeof(struct dtl_entry);
198 } 314 }
199 315
200 /* .. and now the head */ 316 /* .. and now the head */
201 read_size = min(n_req, cur_idx - last_idx); 317 rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
202 rc = copy_to_user(buf, &dtl->buf[last_idx],
203 read_size * sizeof(struct dtl_entry));
204 if (rc) 318 if (rc)
205 return -EFAULT; 319 return -EFAULT;
206 320
207 n_read += read_size; 321 n_read += n_req;
208 dtl->last_idx += n_read;
209 322
210 return n_read * sizeof(struct dtl_entry); 323 return n_read * sizeof(struct dtl_entry);
211} 324}
@@ -263,6 +376,7 @@ static int dtl_init(void)
263 /* set up the per-cpu log structures */ 376 /* set up the per-cpu log structures */
264 for_each_possible_cpu(i) { 377 for_each_possible_cpu(i) {
265 struct dtl *dtl = &per_cpu(cpu_dtl, i); 378 struct dtl *dtl = &per_cpu(cpu_dtl, i);
379 spin_lock_init(&dtl->lock);
266 dtl->cpu = i; 380 dtl->cpu = i;
267 381
268 rc = dtl_setup_file(dtl); 382 rc = dtl_setup_file(dtl);