aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPete Zaitcev <zaitcev@redhat.com>2006-12-31 01:43:10 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2007-02-07 18:44:34 -0500
commit6f23ee1fefdc1f80bd8a3ab04a1c41ab2dec14c9 (patch)
tree36a5241c29333580de3e3c75e2c62edc1cdf583c
parenta8ef36bc0a5fe973bddaa54a5a07cda29e04a602 (diff)
USB: add binary API to usbmon
This patch adds a new, "binary" API in addition to the old, text API usbmon had before. The new API allows for less CPU use, and it allows to capture all data from a packet where old API only captured 32 bytes at most. There are some limitations and conditions to this, e.g. in case someone constructs a URB with 1GB of data, it's not likely to be captured, because even the huge buffers of the new reader are finite. Nonetheless, I expect this new capability to capture all data for all real life scenarios. The downside is, a special user mode application is required where cat(1) worked before. I have sample code at http://people.redhat.com/zaitcev/linux/ and Paolo Abeni is working on patching libpcap. This patch was initially written by Paolo and later I tweaked it, and we had a little back-and-forth. So this is a jointly authored patch, but I am submitting this I am responsible for the bugs. Signed-off-by: Paolo Abeni <paolo.abeni@email.it> Signed-off-by: Pete Zaitcev <zaitcev@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--Documentation/usb/usbmon.txt152
-rw-r--r--drivers/usb/mon/Makefile2
-rw-r--r--drivers/usb/mon/mon_bin.c1172
-rw-r--r--drivers/usb/mon/mon_dma.c39
-rw-r--r--drivers/usb/mon/mon_main.c97
-rw-r--r--drivers/usb/mon/mon_text.c67
-rw-r--r--drivers/usb/mon/usb_mon.h30
7 files changed, 1502 insertions, 57 deletions
diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
index e65ec828d7aa..0f6808abd612 100644
--- a/Documentation/usb/usbmon.txt
+++ b/Documentation/usb/usbmon.txt
@@ -77,7 +77,7 @@ that the file size is not excessive for your favourite editor.
77 77
78The '1t' type data consists of a stream of events, such as URB submission, 78The '1t' type data consists of a stream of events, such as URB submission,
79URB callback, submission error. Every event is a text line, which consists 79URB callback, submission error. Every event is a text line, which consists
80of whitespace separated words. The number of position of words may depend 80of whitespace separated words. The number or position of words may depend
81on the event type, but there is a set of words, common for all types. 81on the event type, but there is a set of words, common for all types.
82 82
83Here is the list of words, from left to right: 83Here is the list of words, from left to right:
@@ -170,4 +170,152 @@ dd65f0e8 4128379808 C Bo:005:02 0 31 >
170 170
171* Raw binary format and API 171* Raw binary format and API
172 172
173TBD 173The overall architecture of the API is about the same as the one above,
174only the events are delivered in binary format. Each event is sent in
175the following structure (its name is made up, so that we can refer to it):
176
177struct usbmon_packet {
178 u64 id; /* 0: URB ID - from submission to callback */
179 unsigned char type; /* 8: Same as text; extensible. */
180 unsigned char xfer_type; /* ISO (0), Intr, Control, Bulk (3) */
181 unsigned char epnum; /* Endpoint number and transfer direction */
182 unsigned char devnum; /* Device address */
183 u16 busnum; /* 12: Bus number */
184 char flag_setup; /* 14: Same as text */
185 char flag_data; /* 15: Same as text; Binary zero is OK. */
186 s64 ts_sec; /* 16: gettimeofday */
187 s32 ts_usec; /* 24: gettimeofday */
188 int status; /* 28: */
189 unsigned int length; /* 32: Length of data (submitted or actual) */
190 unsigned int len_cap; /* 36: Delivered length */
191 unsigned char setup[8]; /* 40: Only for Control 'S' */
192}; /* 48 bytes total */
193
194These events can be received from a character device by reading with read(2),
195with an ioctl(2), or by accessing the buffer with mmap.
196
197The character device is usually called /dev/usbmonN, where N is the USB bus
198number. Number zero (/dev/usbmon0) is special and means "all buses".
199However, this feature is not implemented yet. Note that specific naming
200policy is set by your Linux distribution.
201
202If you create /dev/usbmon0 by hand, make sure that it is owned by root
203and has mode 0600. Otherwise, unpriviledged users will be able to snoop
204keyboard traffic.
205
206The following ioctl calls are available, with MON_IOC_MAGIC 0x92:
207
208 MON_IOCQ_URB_LEN, defined as _IO(MON_IOC_MAGIC, 1)
209
210This call returns the length of data in the next event. Note that majority of
211events contain no data, so if this call returns zero, it does not mean that
212no events are available.
213
214 MON_IOCG_STATS, defined as _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
215
216The argument is a pointer to the following structure:
217
218struct mon_bin_stats {
219 u32 queued;
220 u32 dropped;
221};
222
223The member "queued" refers to the number of events currently queued in the
224buffer (and not to the number of events processed since the last reset).
225
226The member "dropped" is the number of events lost since the last call
227to MON_IOCG_STATS.
228
229 MON_IOCT_RING_SIZE, defined as _IO(MON_IOC_MAGIC, 4)
230
231This call sets the buffer size. The argument is the size in bytes.
232The size may be rounded down to the next chunk (or page). If the requested
233size is out of [unspecified] bounds for this kernel, the call fails with
234-EINVAL.
235
236 MON_IOCQ_RING_SIZE, defined as _IO(MON_IOC_MAGIC, 5)
237
238This call returns the current size of the buffer in bytes.
239
240 MON_IOCX_GET, defined as _IOW(MON_IOC_MAGIC, 6, struct mon_get_arg)
241
242This call waits for events to arrive if none were in the kernel buffer,
243then returns the first event. Its argument is a pointer to the following
244structure:
245
246struct mon_get_arg {
247 struct usbmon_packet *hdr;
248 void *data;
249 size_t alloc; /* Length of data (can be zero) */
250};
251
252Before the call, hdr, data, and alloc should be filled. Upon return, the area
253pointed by hdr contains the next event structure, and the data buffer contains
254the data, if any. The event is removed from the kernel buffer.
255
256 MON_IOCX_MFETCH, defined as _IOWR(MON_IOC_MAGIC, 7, struct mon_mfetch_arg)
257
258This ioctl is primarily used when the application accesses the buffer
259with mmap(2). Its argument is a pointer to the following structure:
260
261struct mon_mfetch_arg {
262 uint32_t *offvec; /* Vector of events fetched */
263 uint32_t nfetch; /* Number of events to fetch (out: fetched) */
264 uint32_t nflush; /* Number of events to flush */
265};
266
267The ioctl operates in 3 stages.
268
269First, it removes and discards up to nflush events from the kernel buffer.
270The actual number of events discarded is returned in nflush.
271
272Second, it waits for an event to be present in the buffer, unless the pseudo-
273device is open with O_NONBLOCK.
274
275Third, it extracts up to nfetch offsets into the mmap buffer, and stores
276them into the offvec. The actual number of event offsets is stored into
277the nfetch.
278
279 MON_IOCH_MFLUSH, defined as _IO(MON_IOC_MAGIC, 8)
280
281This call removes a number of events from the kernel buffer. Its argument
282is the number of events to remove. If the buffer contains fewer events
283than requested, all events present are removed, and no error is reported.
284This works when no events are available too.
285
286 FIONBIO
287
288The ioctl FIONBIO may be implemented in the future, if there's a need.
289
290In addition to ioctl(2) and read(2), the special file of binary API can
291be polled with select(2) and poll(2). But lseek(2) does not work.
292
293* Memory-mapped access of the kernel buffer for the binary API
294
295The basic idea is simple:
296
297To prepare, map the buffer by getting the current size, then using mmap(2).
298Then, execute a loop similar to the one written in pseudo-code below:
299
300 struct mon_mfetch_arg fetch;
301 struct usbmon_packet *hdr;
302 int nflush = 0;
303 for (;;) {
304 fetch.offvec = vec; // Has N 32-bit words
305 fetch.nfetch = N; // Or less than N
306 fetch.nflush = nflush;
307 ioctl(fd, MON_IOCX_MFETCH, &fetch); // Process errors, too
308 nflush = fetch.nfetch; // This many packets to flush when done
309 for (i = 0; i < nflush; i++) {
310 hdr = (struct ubsmon_packet *) &mmap_area[vec[i]];
311 if (hdr->type == '@') // Filler packet
312 continue;
313 caddr_t data = &mmap_area[vec[i]] + 64;
314 process_packet(hdr, data);
315 }
316 }
317
318Thus, the main idea is to execute only one ioctl per N events.
319
320Although the buffer is circular, the returned headers and data do not cross
321the end of the buffer, so the above pseudo-code does not need any gathering.
diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile
index 3cf3ea3a88ed..90c59535778d 100644
--- a/drivers/usb/mon/Makefile
+++ b/drivers/usb/mon/Makefile
@@ -2,7 +2,7 @@
2# Makefile for USB Core files and filesystem 2# Makefile for USB Core files and filesystem
3# 3#
4 4
5usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_dma.o 5usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o mon_dma.o
6 6
7# This does not use CONFIG_USB_MON because we want this to use a tristate. 7# This does not use CONFIG_USB_MON because we want this to use a tristate.
8obj-$(CONFIG_USB) += usbmon.o 8obj-$(CONFIG_USB) += usbmon.o
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
new file mode 100644
index 000000000000..c01dfe603672
--- /dev/null
+++ b/drivers/usb/mon/mon_bin.c
@@ -0,0 +1,1172 @@
1/*
2 * The USB Monitor, inspired by Dave Harding's USBMon.
3 *
4 * This is a binary format reader.
5 *
6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it)
7 * Copyright (C) 2006 Pete Zaitcev (zaitcev@redhat.com)
8 */
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/cdev.h>
14#include <linux/usb.h>
15#include <linux/poll.h>
16#include <linux/compat.h>
17#include <linux/mm.h>
18
19#include <asm/uaccess.h>
20
21#include "usb_mon.h"
22
23/*
24 * Defined by USB 2.0 clause 9.3, table 9.2.
25 */
26#define SETUP_LEN 8
27
28/* ioctl macros */
29#define MON_IOC_MAGIC 0x92
30
31#define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1)
32/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */
33#define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
34#define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4)
35#define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5)
36#define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
37#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
38#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
39#ifdef CONFIG_COMPAT
40#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
41#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
42#endif
43
44/*
45 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc).
46 * But it's all right. Just use a simple way to make sure the chunk is never
47 * smaller than a page.
48 *
49 * N.B. An application does not know our chunk size.
50 *
51 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with
52 * page-sized chunks for the time being.
53 */
54#define CHUNK_SIZE PAGE_SIZE
55#define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
56
57/*
58 * The magic limit was calculated so that it allows the monitoring
59 * application to pick data once in two ticks. This way, another application,
60 * which presumably drives the bus, gets to hog CPU, yet we collect our data.
61 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an
62 * enormous overhead built into the bus protocol, so we need about 1000 KB.
63 *
64 * This is still too much for most cases, where we just snoop a few
65 * descriptor fetches for enumeration. So, the default is a "reasonable"
66 * amount for systems with HZ=250 and incomplete bus saturation.
67 *
68 * XXX What about multi-megabyte URBs which take minutes to transfer?
69 */
70#define BUFF_MAX CHUNK_ALIGN(1200*1024)
71#define BUFF_DFL CHUNK_ALIGN(300*1024)
72#define BUFF_MIN CHUNK_ALIGN(8*1024)
73
74/*
75 * The per-event API header (2 per URB).
76 *
77 * This structure is seen in userland as defined by the documentation.
78 */
79struct mon_bin_hdr {
80 u64 id; /* URB ID - from submission to callback */
81 unsigned char type; /* Same as in text API; extensible. */
82 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */
83 unsigned char epnum; /* Endpoint number and transfer direction */
84 unsigned char devnum; /* Device address */
85 unsigned short busnum; /* Bus number */
86 char flag_setup;
87 char flag_data;
88 s64 ts_sec; /* gettimeofday */
89 s32 ts_usec; /* gettimeofday */
90 int status;
91 unsigned int len_urb; /* Length of data (submitted or actual) */
92 unsigned int len_cap; /* Delivered length */
93 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */
94};
95
96/* per file statistic */
97struct mon_bin_stats {
98 u32 queued;
99 u32 dropped;
100};
101
102struct mon_bin_get {
103 struct mon_bin_hdr __user *hdr; /* Only 48 bytes, not 64. */
104 void __user *data;
105 size_t alloc; /* Length of data (can be zero) */
106};
107
108struct mon_bin_mfetch {
109 u32 __user *offvec; /* Vector of events fetched */
110 u32 nfetch; /* Number of events to fetch (out: fetched) */
111 u32 nflush; /* Number of events to flush */
112};
113
114#ifdef CONFIG_COMPAT
115struct mon_bin_get32 {
116 u32 hdr32;
117 u32 data32;
118 u32 alloc32;
119};
120
121struct mon_bin_mfetch32 {
122 u32 offvec32;
123 u32 nfetch32;
124 u32 nflush32;
125};
126#endif
127
128/* Having these two values same prevents wrapping of the mon_bin_hdr */
129#define PKT_ALIGN 64
130#define PKT_SIZE 64
131
132/* max number of USB bus supported */
133#define MON_BIN_MAX_MINOR 128
134
135/*
136 * The buffer: map of used pages.
137 */
138struct mon_pgmap {
139 struct page *pg;
140 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */
141};
142
143/*
144 * This gets associated with an open file struct.
145 */
146struct mon_reader_bin {
147 /* The buffer: one per open. */
148 spinlock_t b_lock; /* Protect b_cnt, b_in */
149 unsigned int b_size; /* Current size of the buffer - bytes */
150 unsigned int b_cnt; /* Bytes used */
151 unsigned int b_in, b_out; /* Offsets into buffer - bytes */
152 unsigned int b_read; /* Amount of read data in curr. pkt. */
153 struct mon_pgmap *b_vec; /* The map array */
154 wait_queue_head_t b_wait; /* Wait for data here */
155
156 struct mutex fetch_lock; /* Protect b_read, b_out */
157 int mmap_active;
158
159 /* A list of these is needed for "bus 0". Some time later. */
160 struct mon_reader r;
161
162 /* Stats */
163 unsigned int cnt_lost;
164};
165
166static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
167 unsigned int offset)
168{
169 return (struct mon_bin_hdr *)
170 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
171}
172
173#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0)
174
175static dev_t mon_bin_dev0;
176static struct cdev mon_bin_cdev;
177
178static void mon_buff_area_fill(const struct mon_reader_bin *rp,
179 unsigned int offset, unsigned int size);
180static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp);
181static int mon_alloc_buff(struct mon_pgmap *map, int npages);
182static void mon_free_buff(struct mon_pgmap *map, int npages);
183
184/*
185 * This is a "chunked memcpy". It does not manipulate any counters.
186 * But it returns the new offset for repeated application.
187 */
188unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
189 unsigned int off, const unsigned char *from, unsigned int length)
190{
191 unsigned int step_len;
192 unsigned char *buf;
193 unsigned int in_page;
194
195 while (length) {
196 /*
197 * Determine step_len.
198 */
199 step_len = length;
200 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
201 if (in_page < step_len)
202 step_len = in_page;
203
204 /*
205 * Copy data and advance pointers.
206 */
207 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
208 memcpy(buf, from, step_len);
209 if ((off += step_len) >= this->b_size) off = 0;
210 from += step_len;
211 length -= step_len;
212 }
213 return off;
214}
215
216/*
217 * This is a little worse than the above because it's "chunked copy_to_user".
218 * The return value is an error code, not an offset.
219 */
220static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off,
221 char __user *to, int length)
222{
223 unsigned int step_len;
224 unsigned char *buf;
225 unsigned int in_page;
226
227 while (length) {
228 /*
229 * Determine step_len.
230 */
231 step_len = length;
232 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
233 if (in_page < step_len)
234 step_len = in_page;
235
236 /*
237 * Copy data and advance pointers.
238 */
239 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
240 if (copy_to_user(to, buf, step_len))
241 return -EINVAL;
242 if ((off += step_len) >= this->b_size) off = 0;
243 to += step_len;
244 length -= step_len;
245 }
246 return 0;
247}
248
249/*
250 * Allocate an (aligned) area in the buffer.
251 * This is called under b_lock.
252 * Returns ~0 on failure.
253 */
254static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp,
255 unsigned int size)
256{
257 unsigned int offset;
258
259 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
260 if (rp->b_cnt + size > rp->b_size)
261 return ~0;
262 offset = rp->b_in;
263 rp->b_cnt += size;
264 if ((rp->b_in += size) >= rp->b_size)
265 rp->b_in -= rp->b_size;
266 return offset;
267}
268
269/*
270 * This is the same thing as mon_buff_area_alloc, only it does not allow
271 * buffers to wrap. This is needed by applications which pass references
272 * into mmap-ed buffers up their stacks (libpcap can do that).
273 *
274 * Currently, we always have the header stuck with the data, although
275 * it is not strictly speaking necessary.
276 *
277 * When a buffer would wrap, we place a filler packet to mark the space.
278 */
279static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
280 unsigned int size)
281{
282 unsigned int offset;
283 unsigned int fill_size;
284
285 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
286 if (rp->b_cnt + size > rp->b_size)
287 return ~0;
288 if (rp->b_in + size > rp->b_size) {
289 /*
290 * This would wrap. Find if we still have space after
291 * skipping to the end of the buffer. If we do, place
292 * a filler packet and allocate a new packet.
293 */
294 fill_size = rp->b_size - rp->b_in;
295 if (rp->b_cnt + size + fill_size > rp->b_size)
296 return ~0;
297 mon_buff_area_fill(rp, rp->b_in, fill_size);
298
299 offset = 0;
300 rp->b_in = size;
301 rp->b_cnt += size + fill_size;
302 } else if (rp->b_in + size == rp->b_size) {
303 offset = rp->b_in;
304 rp->b_in = 0;
305 rp->b_cnt += size;
306 } else {
307 offset = rp->b_in;
308 rp->b_in += size;
309 rp->b_cnt += size;
310 }
311 return offset;
312}
313
314/*
315 * Return a few (kilo-)bytes to the head of the buffer.
316 * This is used if a DMA fetch fails.
317 */
318static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
319{
320
321 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
322 rp->b_cnt -= size;
323 if (rp->b_in < size)
324 rp->b_in += rp->b_size;
325 rp->b_in -= size;
326}
327
328/*
329 * This has to be called under both b_lock and fetch_lock, because
330 * it accesses both b_cnt and b_out.
331 */
332static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size)
333{
334
335 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
336 rp->b_cnt -= size;
337 if ((rp->b_out += size) >= rp->b_size)
338 rp->b_out -= rp->b_size;
339}
340
341static void mon_buff_area_fill(const struct mon_reader_bin *rp,
342 unsigned int offset, unsigned int size)
343{
344 struct mon_bin_hdr *ep;
345
346 ep = MON_OFF2HDR(rp, offset);
347 memset(ep, 0, PKT_SIZE);
348 ep->type = '@';
349 ep->len_cap = size - PKT_SIZE;
350}
351
352static inline char mon_bin_get_setup(unsigned char *setupb,
353 const struct urb *urb, char ev_type)
354{
355
356 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S')
357 return '-';
358
359 if (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)
360 return mon_dmapeek(setupb, urb->setup_dma, SETUP_LEN);
361 if (urb->setup_packet == NULL)
362 return 'Z';
363
364 memcpy(setupb, urb->setup_packet, SETUP_LEN);
365 return 0;
366}
367
368static char mon_bin_get_data(const struct mon_reader_bin *rp,
369 unsigned int offset, struct urb *urb, unsigned int length)
370{
371
372 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) {
373 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length);
374 return 0;
375 }
376
377 if (urb->transfer_buffer == NULL)
378 return 'Z';
379
380 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
381 return 0;
382}
383
384static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
385 char ev_type)
386{
387 unsigned long flags;
388 struct timeval ts;
389 unsigned int urb_length;
390 unsigned int offset;
391 unsigned int length;
392 struct mon_bin_hdr *ep;
393 char data_tag = 0;
394
395 do_gettimeofday(&ts);
396
397 spin_lock_irqsave(&rp->b_lock, flags);
398
399 /*
400 * Find the maximum allowable length, then allocate space.
401 */
402 urb_length = (ev_type == 'S') ?
403 urb->transfer_buffer_length : urb->actual_length;
404 length = urb_length;
405
406 if (length >= rp->b_size/5)
407 length = rp->b_size/5;
408
409 if (usb_pipein(urb->pipe)) {
410 if (ev_type == 'S') {
411 length = 0;
412 data_tag = '<';
413 }
414 } else {
415 if (ev_type == 'C') {
416 length = 0;
417 data_tag = '>';
418 }
419 }
420
421 if (rp->mmap_active)
422 offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE);
423 else
424 offset = mon_buff_area_alloc(rp, length + PKT_SIZE);
425 if (offset == ~0) {
426 rp->cnt_lost++;
427 spin_unlock_irqrestore(&rp->b_lock, flags);
428 return;
429 }
430
431 ep = MON_OFF2HDR(rp, offset);
432 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0;
433
434 /*
435 * Fill the allocated area.
436 */
437 memset(ep, 0, PKT_SIZE);
438 ep->type = ev_type;
439 ep->xfer_type = usb_pipetype(urb->pipe);
440 /* We use the fact that usb_pipein() returns 0x80 */
441 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe);
442 ep->devnum = usb_pipedevice(urb->pipe);
443 ep->busnum = rp->r.m_bus->u_bus->busnum;
444 ep->id = (unsigned long) urb;
445 ep->ts_sec = ts.tv_sec;
446 ep->ts_usec = ts.tv_usec;
447 ep->status = urb->status;
448 ep->len_urb = urb_length;
449 ep->len_cap = length;
450
451 ep->flag_setup = mon_bin_get_setup(ep->setup, urb, ev_type);
452 if (length != 0) {
453 ep->flag_data = mon_bin_get_data(rp, offset, urb, length);
454 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */
455 ep->len_cap = 0;
456 mon_buff_area_shrink(rp, length);
457 }
458 } else {
459 ep->flag_data = data_tag;
460 }
461
462 spin_unlock_irqrestore(&rp->b_lock, flags);
463
464 wake_up(&rp->b_wait);
465}
466
467static void mon_bin_submit(void *data, struct urb *urb)
468{
469 struct mon_reader_bin *rp = data;
470 mon_bin_event(rp, urb, 'S');
471}
472
473static void mon_bin_complete(void *data, struct urb *urb)
474{
475 struct mon_reader_bin *rp = data;
476 mon_bin_event(rp, urb, 'C');
477}
478
479static void mon_bin_error(void *data, struct urb *urb, int error)
480{
481 struct mon_reader_bin *rp = data;
482 unsigned long flags;
483 unsigned int offset;
484 struct mon_bin_hdr *ep;
485
486 spin_lock_irqsave(&rp->b_lock, flags);
487
488 offset = mon_buff_area_alloc(rp, PKT_SIZE);
489 if (offset == ~0) {
490 /* Not incrementing cnt_lost. Just because. */
491 spin_unlock_irqrestore(&rp->b_lock, flags);
492 return;
493 }
494
495 ep = MON_OFF2HDR(rp, offset);
496
497 memset(ep, 0, PKT_SIZE);
498 ep->type = 'E';
499 ep->xfer_type = usb_pipetype(urb->pipe);
500 /* We use the fact that usb_pipein() returns 0x80 */
501 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe);
502 ep->devnum = usb_pipedevice(urb->pipe);
503 ep->busnum = rp->r.m_bus->u_bus->busnum;
504 ep->id = (unsigned long) urb;
505 ep->status = error;
506
507 ep->flag_setup = '-';
508 ep->flag_data = 'E';
509
510 spin_unlock_irqrestore(&rp->b_lock, flags);
511
512 wake_up(&rp->b_wait);
513}
514
515static int mon_bin_open(struct inode *inode, struct file *file)
516{
517 struct mon_bus *mbus;
518 struct usb_bus *ubus;
519 struct mon_reader_bin *rp;
520 size_t size;
521 int rc;
522
523 mutex_lock(&mon_lock);
524 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) {
525 mutex_unlock(&mon_lock);
526 return -ENODEV;
527 }
528 if ((ubus = mbus->u_bus) == NULL) {
529 printk(KERN_ERR TAG ": consistency error on open\n");
530 mutex_unlock(&mon_lock);
531 return -ENODEV;
532 }
533
534 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL);
535 if (rp == NULL) {
536 rc = -ENOMEM;
537 goto err_alloc;
538 }
539 spin_lock_init(&rp->b_lock);
540 init_waitqueue_head(&rp->b_wait);
541 mutex_init(&rp->fetch_lock);
542
543 rp->b_size = BUFF_DFL;
544
545 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
546 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) {
547 rc = -ENOMEM;
548 goto err_allocvec;
549 }
550
551 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0)
552 goto err_allocbuff;
553
554 rp->r.m_bus = mbus;
555 rp->r.r_data = rp;
556 rp->r.rnf_submit = mon_bin_submit;
557 rp->r.rnf_error = mon_bin_error;
558 rp->r.rnf_complete = mon_bin_complete;
559
560 mon_reader_add(mbus, &rp->r);
561
562 file->private_data = rp;
563 mutex_unlock(&mon_lock);
564 return 0;
565
566err_allocbuff:
567 kfree(rp->b_vec);
568err_allocvec:
569 kfree(rp);
570err_alloc:
571 mutex_unlock(&mon_lock);
572 return rc;
573}
574
575/*
576 * Extract an event from buffer and copy it to user space.
577 * Wait if there is no event ready.
578 * Returns zero or error.
579 */
580static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp,
581 struct mon_bin_hdr __user *hdr, void __user *data, unsigned int nbytes)
582{
583 unsigned long flags;
584 struct mon_bin_hdr *ep;
585 size_t step_len;
586 unsigned int offset;
587 int rc;
588
589 mutex_lock(&rp->fetch_lock);
590
591 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
592 mutex_unlock(&rp->fetch_lock);
593 return rc;
594 }
595
596 ep = MON_OFF2HDR(rp, rp->b_out);
597
598 if (copy_to_user(hdr, ep, sizeof(struct mon_bin_hdr))) {
599 mutex_unlock(&rp->fetch_lock);
600 return -EFAULT;
601 }
602
603 step_len = min(ep->len_cap, nbytes);
604 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0;
605
606 if (copy_from_buf(rp, offset, data, step_len)) {
607 mutex_unlock(&rp->fetch_lock);
608 return -EFAULT;
609 }
610
611 spin_lock_irqsave(&rp->b_lock, flags);
612 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
613 spin_unlock_irqrestore(&rp->b_lock, flags);
614 rp->b_read = 0;
615
616 mutex_unlock(&rp->fetch_lock);
617 return 0;
618}
619
620static int mon_bin_release(struct inode *inode, struct file *file)
621{
622 struct mon_reader_bin *rp = file->private_data;
623 struct mon_bus* mbus = rp->r.m_bus;
624
625 mutex_lock(&mon_lock);
626
627 if (mbus->nreaders <= 0) {
628 printk(KERN_ERR TAG ": consistency error on close\n");
629 mutex_unlock(&mon_lock);
630 return 0;
631 }
632 mon_reader_del(mbus, &rp->r);
633
634 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
635 kfree(rp->b_vec);
636 kfree(rp);
637
638 mutex_unlock(&mon_lock);
639 return 0;
640}
641
642static ssize_t mon_bin_read(struct file *file, char __user *buf,
643 size_t nbytes, loff_t *ppos)
644{
645 struct mon_reader_bin *rp = file->private_data;
646 unsigned long flags;
647 struct mon_bin_hdr *ep;
648 unsigned int offset;
649 size_t step_len;
650 char *ptr;
651 ssize_t done = 0;
652 int rc;
653
654 mutex_lock(&rp->fetch_lock);
655
656 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
657 mutex_unlock(&rp->fetch_lock);
658 return rc;
659 }
660
661 ep = MON_OFF2HDR(rp, rp->b_out);
662
663 if (rp->b_read < sizeof(struct mon_bin_hdr)) {
664 step_len = min(nbytes, sizeof(struct mon_bin_hdr) - rp->b_read);
665 ptr = ((char *)ep) + rp->b_read;
666 if (step_len && copy_to_user(buf, ptr, step_len)) {
667 mutex_unlock(&rp->fetch_lock);
668 return -EFAULT;
669 }
670 nbytes -= step_len;
671 buf += step_len;
672 rp->b_read += step_len;
673 done += step_len;
674 }
675
676 if (rp->b_read >= sizeof(struct mon_bin_hdr)) {
677 step_len = min(nbytes, (size_t)ep->len_cap);
678 offset = rp->b_out + PKT_SIZE;
679 offset += rp->b_read - sizeof(struct mon_bin_hdr);
680 if (offset >= rp->b_size)
681 offset -= rp->b_size;
682 if (copy_from_buf(rp, offset, buf, step_len)) {
683 mutex_unlock(&rp->fetch_lock);
684 return -EFAULT;
685 }
686 nbytes -= step_len;
687 buf += step_len;
688 rp->b_read += step_len;
689 done += step_len;
690 }
691
692 /*
693 * Check if whole packet was read, and if so, jump to the next one.
694 */
695 if (rp->b_read >= sizeof(struct mon_bin_hdr) + ep->len_cap) {
696 spin_lock_irqsave(&rp->b_lock, flags);
697 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
698 spin_unlock_irqrestore(&rp->b_lock, flags);
699 rp->b_read = 0;
700 }
701
702 mutex_unlock(&rp->fetch_lock);
703 return done;
704}
705
706/*
707 * Remove at most nevents from chunked buffer.
708 * Returns the number of removed events.
709 */
710static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
711{
712 unsigned long flags;
713 struct mon_bin_hdr *ep;
714 int i;
715
716 mutex_lock(&rp->fetch_lock);
717 spin_lock_irqsave(&rp->b_lock, flags);
718 for (i = 0; i < nevents; ++i) {
719 if (MON_RING_EMPTY(rp))
720 break;
721
722 ep = MON_OFF2HDR(rp, rp->b_out);
723 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
724 }
725 spin_unlock_irqrestore(&rp->b_lock, flags);
726 rp->b_read = 0;
727 mutex_unlock(&rp->fetch_lock);
728 return i;
729}
730
731/*
732 * Fetch at most max event offsets into the buffer and put them into vec.
733 * The events are usually freed later with mon_bin_flush.
734 * Return the effective number of events fetched.
735 */
736static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
737 u32 __user *vec, unsigned int max)
738{
739 unsigned int cur_out;
740 unsigned int bytes, avail;
741 unsigned int size;
742 unsigned int nevents;
743 struct mon_bin_hdr *ep;
744 unsigned long flags;
745 int rc;
746
747 mutex_lock(&rp->fetch_lock);
748
749 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
750 mutex_unlock(&rp->fetch_lock);
751 return rc;
752 }
753
754 spin_lock_irqsave(&rp->b_lock, flags);
755 avail = rp->b_cnt;
756 spin_unlock_irqrestore(&rp->b_lock, flags);
757
758 cur_out = rp->b_out;
759 nevents = 0;
760 bytes = 0;
761 while (bytes < avail) {
762 if (nevents >= max)
763 break;
764
765 ep = MON_OFF2HDR(rp, cur_out);
766 if (put_user(cur_out, &vec[nevents])) {
767 mutex_unlock(&rp->fetch_lock);
768 return -EFAULT;
769 }
770
771 nevents++;
772 size = ep->len_cap + PKT_SIZE;
773 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
774 if ((cur_out += size) >= rp->b_size)
775 cur_out -= rp->b_size;
776 bytes += size;
777 }
778
779 mutex_unlock(&rp->fetch_lock);
780 return nevents;
781}
782
783/*
784 * Count events. This is almost the same as the above mon_bin_fetch,
785 * only we do not store offsets into user vector, and we have no limit.
786 */
787static int mon_bin_queued(struct mon_reader_bin *rp)
788{
789 unsigned int cur_out;
790 unsigned int bytes, avail;
791 unsigned int size;
792 unsigned int nevents;
793 struct mon_bin_hdr *ep;
794 unsigned long flags;
795
796 mutex_lock(&rp->fetch_lock);
797
798 spin_lock_irqsave(&rp->b_lock, flags);
799 avail = rp->b_cnt;
800 spin_unlock_irqrestore(&rp->b_lock, flags);
801
802 cur_out = rp->b_out;
803 nevents = 0;
804 bytes = 0;
805 while (bytes < avail) {
806 ep = MON_OFF2HDR(rp, cur_out);
807
808 nevents++;
809 size = ep->len_cap + PKT_SIZE;
810 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
811 if ((cur_out += size) >= rp->b_size)
812 cur_out -= rp->b_size;
813 bytes += size;
814 }
815
816 mutex_unlock(&rp->fetch_lock);
817 return nevents;
818}
819
820/*
821 */
822static int mon_bin_ioctl(struct inode *inode, struct file *file,
823 unsigned int cmd, unsigned long arg)
824{
825 struct mon_reader_bin *rp = file->private_data;
826 // struct mon_bus* mbus = rp->r.m_bus;
827 int ret = 0;
828 struct mon_bin_hdr *ep;
829 unsigned long flags;
830
831 switch (cmd) {
832
833 case MON_IOCQ_URB_LEN:
834 /*
835 * N.B. This only returns the size of data, without the header.
836 */
837 spin_lock_irqsave(&rp->b_lock, flags);
838 if (!MON_RING_EMPTY(rp)) {
839 ep = MON_OFF2HDR(rp, rp->b_out);
840 ret = ep->len_cap;
841 }
842 spin_unlock_irqrestore(&rp->b_lock, flags);
843 break;
844
845 case MON_IOCQ_RING_SIZE:
846 ret = rp->b_size;
847 break;
848
849 case MON_IOCT_RING_SIZE:
850 /*
851 * Changing the buffer size will flush it's contents; the new
852 * buffer is allocated before releasing the old one to be sure
853 * the device will stay functional also in case of memory
854 * pressure.
855 */
856 {
857 int size;
858 struct mon_pgmap *vec;
859
860 if (arg < BUFF_MIN || arg > BUFF_MAX)
861 return -EINVAL;
862
863 size = CHUNK_ALIGN(arg);
864 if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE),
865 GFP_KERNEL)) == NULL) {
866 ret = -ENOMEM;
867 break;
868 }
869
870 ret = mon_alloc_buff(vec, size/CHUNK_SIZE);
871 if (ret < 0) {
872 kfree(vec);
873 break;
874 }
875
876 mutex_lock(&rp->fetch_lock);
877 spin_lock_irqsave(&rp->b_lock, flags);
878 mon_free_buff(rp->b_vec, size/CHUNK_SIZE);
879 kfree(rp->b_vec);
880 rp->b_vec = vec;
881 rp->b_size = size;
882 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
883 rp->cnt_lost = 0;
884 spin_unlock_irqrestore(&rp->b_lock, flags);
885 mutex_unlock(&rp->fetch_lock);
886 }
887 break;
888
889 case MON_IOCH_MFLUSH:
890 ret = mon_bin_flush(rp, arg);
891 break;
892
893 case MON_IOCX_GET:
894 {
895 struct mon_bin_get getb;
896
897 if (copy_from_user(&getb, (void __user *)arg,
898 sizeof(struct mon_bin_get)))
899 return -EFAULT;
900
901 if (getb.alloc > 0x10000000) /* Want to cast to u32 */
902 return -EINVAL;
903 ret = mon_bin_get_event(file, rp,
904 getb.hdr, getb.data, (unsigned int)getb.alloc);
905 }
906 break;
907
908#ifdef CONFIG_COMPAT
909 case MON_IOCX_GET32: {
910 struct mon_bin_get32 getb;
911
912 if (copy_from_user(&getb, (void __user *)arg,
913 sizeof(struct mon_bin_get32)))
914 return -EFAULT;
915
916 ret = mon_bin_get_event(file, rp,
917 compat_ptr(getb.hdr32), compat_ptr(getb.data32),
918 getb.alloc32);
919 }
920 break;
921#endif
922
923 case MON_IOCX_MFETCH:
924 {
925 struct mon_bin_mfetch mfetch;
926 struct mon_bin_mfetch __user *uptr;
927
928 uptr = (struct mon_bin_mfetch __user *)arg;
929
930 if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
931 return -EFAULT;
932
933 if (mfetch.nflush) {
934 ret = mon_bin_flush(rp, mfetch.nflush);
935 if (ret < 0)
936 return ret;
937 if (put_user(ret, &uptr->nflush))
938 return -EFAULT;
939 }
940 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch);
941 if (ret < 0)
942 return ret;
943 if (put_user(ret, &uptr->nfetch))
944 return -EFAULT;
945 ret = 0;
946 }
947 break;
948
949#ifdef CONFIG_COMPAT
950 case MON_IOCX_MFETCH32:
951 {
952 struct mon_bin_mfetch32 mfetch;
953 struct mon_bin_mfetch32 __user *uptr;
954
955 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg);
956
957 if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
958 return -EFAULT;
959
960 if (mfetch.nflush32) {
961 ret = mon_bin_flush(rp, mfetch.nflush32);
962 if (ret < 0)
963 return ret;
964 if (put_user(ret, &uptr->nflush32))
965 return -EFAULT;
966 }
967 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32),
968 mfetch.nfetch32);
969 if (ret < 0)
970 return ret;
971 if (put_user(ret, &uptr->nfetch32))
972 return -EFAULT;
973 ret = 0;
974 }
975 break;
976#endif
977
978 case MON_IOCG_STATS: {
979 struct mon_bin_stats __user *sp;
980 unsigned int nevents;
981 unsigned int ndropped;
982
983 spin_lock_irqsave(&rp->b_lock, flags);
984 ndropped = rp->cnt_lost;
985 rp->cnt_lost = 0;
986 spin_unlock_irqrestore(&rp->b_lock, flags);
987 nevents = mon_bin_queued(rp);
988
989 sp = (struct mon_bin_stats __user *)arg;
990 if (put_user(rp->cnt_lost, &sp->dropped))
991 return -EFAULT;
992 if (put_user(nevents, &sp->queued))
993 return -EFAULT;
994
995 }
996 break;
997
998 default:
999 return -ENOTTY;
1000 }
1001
1002 return ret;
1003}
1004
1005static unsigned int
1006mon_bin_poll(struct file *file, struct poll_table_struct *wait)
1007{
1008 struct mon_reader_bin *rp = file->private_data;
1009 unsigned int mask = 0;
1010 unsigned long flags;
1011
1012 if (file->f_mode & FMODE_READ)
1013 poll_wait(file, &rp->b_wait, wait);
1014
1015 spin_lock_irqsave(&rp->b_lock, flags);
1016 if (!MON_RING_EMPTY(rp))
1017 mask |= POLLIN | POLLRDNORM; /* readable */
1018 spin_unlock_irqrestore(&rp->b_lock, flags);
1019 return mask;
1020}
1021
1022/*
1023 * open and close: just keep track of how many times the device is
1024 * mapped, to use the proper memory allocation function.
1025 */
1026static void mon_bin_vma_open(struct vm_area_struct *vma)
1027{
1028 struct mon_reader_bin *rp = vma->vm_private_data;
1029 rp->mmap_active++;
1030}
1031
1032static void mon_bin_vma_close(struct vm_area_struct *vma)
1033{
1034 struct mon_reader_bin *rp = vma->vm_private_data;
1035 rp->mmap_active--;
1036}
1037
1038/*
1039 * Map ring pages to user space.
1040 */
1041struct page *mon_bin_vma_nopage(struct vm_area_struct *vma,
1042 unsigned long address, int *type)
1043{
1044 struct mon_reader_bin *rp = vma->vm_private_data;
1045 unsigned long offset, chunk_idx;
1046 struct page *pageptr;
1047
1048 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
1049 if (offset >= rp->b_size)
1050 return NOPAGE_SIGBUS;
1051 chunk_idx = offset / CHUNK_SIZE;
1052 pageptr = rp->b_vec[chunk_idx].pg;
1053 get_page(pageptr);
1054 if (type)
1055 *type = VM_FAULT_MINOR;
1056 return pageptr;
1057}
1058
1059struct vm_operations_struct mon_bin_vm_ops = {
1060 .open = mon_bin_vma_open,
1061 .close = mon_bin_vma_close,
1062 .nopage = mon_bin_vma_nopage,
1063};
1064
1065int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
1066{
1067 /* don't do anything here: "nopage" will set up page table entries */
1068 vma->vm_ops = &mon_bin_vm_ops;
1069 vma->vm_flags |= VM_RESERVED;
1070 vma->vm_private_data = filp->private_data;
1071 mon_bin_vma_open(vma);
1072 return 0;
1073}
1074
1075struct file_operations mon_fops_binary = {
1076 .owner = THIS_MODULE,
1077 .open = mon_bin_open,
1078 .llseek = no_llseek,
1079 .read = mon_bin_read,
1080 /* .write = mon_text_write, */
1081 .poll = mon_bin_poll,
1082 .ioctl = mon_bin_ioctl,
1083 .release = mon_bin_release,
1084};
1085
1086static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp)
1087{
1088 DECLARE_WAITQUEUE(waita, current);
1089 unsigned long flags;
1090
1091 add_wait_queue(&rp->b_wait, &waita);
1092 set_current_state(TASK_INTERRUPTIBLE);
1093
1094 spin_lock_irqsave(&rp->b_lock, flags);
1095 while (MON_RING_EMPTY(rp)) {
1096 spin_unlock_irqrestore(&rp->b_lock, flags);
1097
1098 if (file->f_flags & O_NONBLOCK) {
1099 set_current_state(TASK_RUNNING);
1100 remove_wait_queue(&rp->b_wait, &waita);
1101 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */
1102 }
1103 schedule();
1104 if (signal_pending(current)) {
1105 remove_wait_queue(&rp->b_wait, &waita);
1106 return -EINTR;
1107 }
1108 set_current_state(TASK_INTERRUPTIBLE);
1109
1110 spin_lock_irqsave(&rp->b_lock, flags);
1111 }
1112 spin_unlock_irqrestore(&rp->b_lock, flags);
1113
1114 set_current_state(TASK_RUNNING);
1115 remove_wait_queue(&rp->b_wait, &waita);
1116 return 0;
1117}
1118
1119static int mon_alloc_buff(struct mon_pgmap *map, int npages)
1120{
1121 int n;
1122 unsigned long vaddr;
1123
1124 for (n = 0; n < npages; n++) {
1125 vaddr = get_zeroed_page(GFP_KERNEL);
1126 if (vaddr == 0) {
1127 while (n-- != 0)
1128 free_page((unsigned long) map[n].ptr);
1129 return -ENOMEM;
1130 }
1131 map[n].ptr = (unsigned char *) vaddr;
1132 map[n].pg = virt_to_page(vaddr);
1133 }
1134 return 0;
1135}
1136
1137static void mon_free_buff(struct mon_pgmap *map, int npages)
1138{
1139 int n;
1140
1141 for (n = 0; n < npages; n++)
1142 free_page((unsigned long) map[n].ptr);
1143}
1144
1145int __init mon_bin_init(void)
1146{
1147 int rc;
1148
1149 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon");
1150 if (rc < 0)
1151 goto err_dev;
1152
1153 cdev_init(&mon_bin_cdev, &mon_fops_binary);
1154 mon_bin_cdev.owner = THIS_MODULE;
1155
1156 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR);
1157 if (rc < 0)
1158 goto err_add;
1159
1160 return 0;
1161
1162err_add:
1163 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1164err_dev:
1165 return rc;
1166}
1167
1168void __exit mon_bin_exit(void)
1169{
1170 cdev_del(&mon_bin_cdev);
1171 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1172}
diff --git a/drivers/usb/mon/mon_dma.c b/drivers/usb/mon/mon_dma.c
index ddcfc01e77a0..140cc80bd2b1 100644
--- a/drivers/usb/mon/mon_dma.c
+++ b/drivers/usb/mon/mon_dma.c
@@ -48,6 +48,36 @@ char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
48 local_irq_restore(flags); 48 local_irq_restore(flags);
49 return 0; 49 return 0;
50} 50}
51
52void mon_dmapeek_vec(const struct mon_reader_bin *rp,
53 unsigned int offset, dma_addr_t dma_addr, unsigned int length)
54{
55 unsigned long flags;
56 unsigned int step_len;
57 struct page *pg;
58 unsigned char *map;
59 unsigned long page_off, page_len;
60
61 local_irq_save(flags);
62 while (length) {
63 /* compute number of bytes we are going to copy in this page */
64 step_len = length;
65 page_off = dma_addr & (PAGE_SIZE-1);
66 page_len = PAGE_SIZE - page_off;
67 if (page_len < step_len)
68 step_len = page_len;
69
70 /* copy data and advance pointers */
71 pg = phys_to_page(dma_addr);
72 map = kmap_atomic(pg, KM_IRQ0);
73 offset = mon_copy_to_buff(rp, offset, map + page_off, step_len);
74 kunmap_atomic(map, KM_IRQ0);
75 dma_addr += step_len;
76 length -= step_len;
77 }
78 local_irq_restore(flags);
79}
80
51#endif /* __i386__ */ 81#endif /* __i386__ */
52 82
53#ifndef MON_HAS_UNMAP 83#ifndef MON_HAS_UNMAP
@@ -55,4 +85,11 @@ char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
55{ 85{
56 return 'D'; 86 return 'D';
57} 87}
58#endif 88
89void mon_dmapeek_vec(const struct mon_reader_bin *rp,
90 unsigned int offset, dma_addr_t dma_addr, unsigned int length)
91{
92 ;
93}
94
95#endif /* MON_HAS_UNMAP */
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 394bbf2f68d4..c9739e7b35e5 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -9,7 +9,6 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/usb.h> 11#include <linux/usb.h>
12#include <linux/debugfs.h>
13#include <linux/smp_lock.h> 12#include <linux/smp_lock.h>
14#include <linux/notifier.h> 13#include <linux/notifier.h>
15#include <linux/mutex.h> 14#include <linux/mutex.h>
@@ -22,11 +21,10 @@ static void mon_complete(struct usb_bus *ubus, struct urb *urb);
22static void mon_stop(struct mon_bus *mbus); 21static void mon_stop(struct mon_bus *mbus);
23static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus); 22static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus);
24static void mon_bus_drop(struct kref *r); 23static void mon_bus_drop(struct kref *r);
25static void mon_bus_init(struct dentry *mondir, struct usb_bus *ubus); 24static void mon_bus_init(struct usb_bus *ubus);
26 25
27DEFINE_MUTEX(mon_lock); 26DEFINE_MUTEX(mon_lock);
28 27
29static struct dentry *mon_dir; /* /dbg/usbmon */
30static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */ 28static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */
31 29
32/* 30/*
@@ -200,7 +198,7 @@ static void mon_stop(struct mon_bus *mbus)
200 */ 198 */
201static void mon_bus_add(struct usb_bus *ubus) 199static void mon_bus_add(struct usb_bus *ubus)
202{ 200{
203 mon_bus_init(mon_dir, ubus); 201 mon_bus_init(ubus);
204} 202}
205 203
206/* 204/*
@@ -212,8 +210,8 @@ static void mon_bus_remove(struct usb_bus *ubus)
212 210
213 mutex_lock(&mon_lock); 211 mutex_lock(&mon_lock);
214 list_del(&mbus->bus_link); 212 list_del(&mbus->bus_link);
215 debugfs_remove(mbus->dent_t); 213 if (mbus->text_inited)
216 debugfs_remove(mbus->dent_s); 214 mon_text_del(mbus);
217 215
218 mon_dissolve(mbus, ubus); 216 mon_dissolve(mbus, ubus);
219 kref_put(&mbus->ref, mon_bus_drop); 217 kref_put(&mbus->ref, mon_bus_drop);
@@ -281,13 +279,9 @@ static void mon_bus_drop(struct kref *r)
281 * - refcount USB bus struct 279 * - refcount USB bus struct
282 * - link 280 * - link
283 */ 281 */
284static void mon_bus_init(struct dentry *mondir, struct usb_bus *ubus) 282static void mon_bus_init(struct usb_bus *ubus)
285{ 283{
286 struct dentry *d;
287 struct mon_bus *mbus; 284 struct mon_bus *mbus;
288 enum { NAMESZ = 10 };
289 char name[NAMESZ];
290 int rc;
291 285
292 if ((mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL)) == NULL) 286 if ((mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL)) == NULL)
293 goto err_alloc; 287 goto err_alloc;
@@ -303,57 +297,54 @@ static void mon_bus_init(struct dentry *mondir, struct usb_bus *ubus)
303 ubus->mon_bus = mbus; 297 ubus->mon_bus = mbus;
304 mbus->uses_dma = ubus->uses_dma; 298 mbus->uses_dma = ubus->uses_dma;
305 299
306 rc = snprintf(name, NAMESZ, "%dt", ubus->busnum); 300 mbus->text_inited = mon_text_add(mbus, ubus);
307 if (rc <= 0 || rc >= NAMESZ) 301 // mon_bin_add(...)
308 goto err_print_t;
309 d = debugfs_create_file(name, 0600, mondir, mbus, &mon_fops_text);
310 if (d == NULL)
311 goto err_create_t;
312 mbus->dent_t = d;
313
314 rc = snprintf(name, NAMESZ, "%ds", ubus->busnum);
315 if (rc <= 0 || rc >= NAMESZ)
316 goto err_print_s;
317 d = debugfs_create_file(name, 0600, mondir, mbus, &mon_fops_stat);
318 if (d == NULL)
319 goto err_create_s;
320 mbus->dent_s = d;
321 302
322 mutex_lock(&mon_lock); 303 mutex_lock(&mon_lock);
323 list_add_tail(&mbus->bus_link, &mon_buses); 304 list_add_tail(&mbus->bus_link, &mon_buses);
324 mutex_unlock(&mon_lock); 305 mutex_unlock(&mon_lock);
325 return; 306 return;
326 307
327err_create_s:
328err_print_s:
329 debugfs_remove(mbus->dent_t);
330err_create_t:
331err_print_t:
332 kfree(mbus);
333err_alloc: 308err_alloc:
334 return; 309 return;
335} 310}
336 311
312/*
313 * Search a USB bus by number. Notice that USB bus numbers start from one,
314 * which we may later use to identify "all" with zero.
315 *
316 * This function must be called with mon_lock held.
317 *
318 * This is obviously inefficient and may be revised in the future.
319 */
320struct mon_bus *mon_bus_lookup(unsigned int num)
321{
322 struct list_head *p;
323 struct mon_bus *mbus;
324
325 list_for_each (p, &mon_buses) {
326 mbus = list_entry(p, struct mon_bus, bus_link);
327 if (mbus->u_bus->busnum == num) {
328 return mbus;
329 }
330 }
331 return NULL;
332}
333
337static int __init mon_init(void) 334static int __init mon_init(void)
338{ 335{
339 struct usb_bus *ubus; 336 struct usb_bus *ubus;
340 struct dentry *mondir; 337 int rc;
341 338
342 mondir = debugfs_create_dir("usbmon", NULL); 339 if ((rc = mon_text_init()) != 0)
343 if (IS_ERR(mondir)) { 340 goto err_text;
344 printk(KERN_NOTICE TAG ": debugfs is not available\n"); 341 if ((rc = mon_bin_init()) != 0)
345 return -ENODEV; 342 goto err_bin;
346 }
347 if (mondir == NULL) {
348 printk(KERN_NOTICE TAG ": unable to create usbmon directory\n");
349 return -ENODEV;
350 }
351 mon_dir = mondir;
352 343
353 if (usb_mon_register(&mon_ops_0) != 0) { 344 if (usb_mon_register(&mon_ops_0) != 0) {
354 printk(KERN_NOTICE TAG ": unable to register with the core\n"); 345 printk(KERN_NOTICE TAG ": unable to register with the core\n");
355 debugfs_remove(mondir); 346 rc = -ENODEV;
356 return -ENODEV; 347 goto err_reg;
357 } 348 }
358 // MOD_INC_USE_COUNT(which_module?); 349 // MOD_INC_USE_COUNT(which_module?);
359 350
@@ -361,10 +352,17 @@ static int __init mon_init(void)
361 352
362 mutex_lock(&usb_bus_list_lock); 353 mutex_lock(&usb_bus_list_lock);
363 list_for_each_entry (ubus, &usb_bus_list, bus_list) { 354 list_for_each_entry (ubus, &usb_bus_list, bus_list) {
364 mon_bus_init(mondir, ubus); 355 mon_bus_init(ubus);
365 } 356 }
366 mutex_unlock(&usb_bus_list_lock); 357 mutex_unlock(&usb_bus_list_lock);
367 return 0; 358 return 0;
359
360err_reg:
361 mon_bin_exit();
362err_bin:
363 mon_text_exit();
364err_text:
365 return rc;
368} 366}
369 367
370static void __exit mon_exit(void) 368static void __exit mon_exit(void)
@@ -381,8 +379,8 @@ static void __exit mon_exit(void)
381 mbus = list_entry(p, struct mon_bus, bus_link); 379 mbus = list_entry(p, struct mon_bus, bus_link);
382 list_del(p); 380 list_del(p);
383 381
384 debugfs_remove(mbus->dent_t); 382 if (mbus->text_inited)
385 debugfs_remove(mbus->dent_s); 383 mon_text_del(mbus);
386 384
387 /* 385 /*
388 * This never happens, because the open/close paths in 386 * This never happens, because the open/close paths in
@@ -401,7 +399,8 @@ static void __exit mon_exit(void)
401 } 399 }
402 mutex_unlock(&mon_lock); 400 mutex_unlock(&mon_lock);
403 401
404 debugfs_remove(mon_dir); 402 mon_text_exit();
403 mon_bin_exit();
405} 404}
406 405
407module_init(mon_init); 406module_init(mon_init);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 05cf2c9a8f84..d38a1279d9d9 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -9,6 +9,7 @@
9#include <linux/usb.h> 9#include <linux/usb.h>
10#include <linux/time.h> 10#include <linux/time.h>
11#include <linux/mutex.h> 11#include <linux/mutex.h>
12#include <linux/debugfs.h>
12#include <asm/uaccess.h> 13#include <asm/uaccess.h>
13 14
14#include "usb_mon.h" 15#include "usb_mon.h"
@@ -63,6 +64,8 @@ struct mon_reader_text {
63 char slab_name[SLAB_NAME_SZ]; 64 char slab_name[SLAB_NAME_SZ];
64}; 65};
65 66
67static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */
68
66static void mon_text_ctor(void *, struct kmem_cache *, unsigned long); 69static void mon_text_ctor(void *, struct kmem_cache *, unsigned long);
67 70
68/* 71/*
@@ -436,7 +439,7 @@ static int mon_text_release(struct inode *inode, struct file *file)
436 return 0; 439 return 0;
437} 440}
438 441
439const struct file_operations mon_fops_text = { 442static const struct file_operations mon_fops_text = {
440 .owner = THIS_MODULE, 443 .owner = THIS_MODULE,
441 .open = mon_text_open, 444 .open = mon_text_open,
442 .llseek = no_llseek, 445 .llseek = no_llseek,
@@ -447,6 +450,47 @@ const struct file_operations mon_fops_text = {
447 .release = mon_text_release, 450 .release = mon_text_release,
448}; 451};
449 452
453int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus)
454{
455 struct dentry *d;
456 enum { NAMESZ = 10 };
457 char name[NAMESZ];
458 int rc;
459
460 rc = snprintf(name, NAMESZ, "%dt", ubus->busnum);
461 if (rc <= 0 || rc >= NAMESZ)
462 goto err_print_t;
463 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_text);
464 if (d == NULL)
465 goto err_create_t;
466 mbus->dent_t = d;
467
468 /* XXX The stats do not belong to here (text API), but oh well... */
469 rc = snprintf(name, NAMESZ, "%ds", ubus->busnum);
470 if (rc <= 0 || rc >= NAMESZ)
471 goto err_print_s;
472 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_stat);
473 if (d == NULL)
474 goto err_create_s;
475 mbus->dent_s = d;
476
477 return 1;
478
479err_create_s:
480err_print_s:
481 debugfs_remove(mbus->dent_t);
482 mbus->dent_t = NULL;
483err_create_t:
484err_print_t:
485 return 0;
486}
487
488void mon_text_del(struct mon_bus *mbus)
489{
490 debugfs_remove(mbus->dent_t);
491 debugfs_remove(mbus->dent_s);
492}
493
450/* 494/*
451 * Slab interface: constructor. 495 * Slab interface: constructor.
452 */ 496 */
@@ -459,3 +503,24 @@ static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sfla
459 memset(mem, 0xe5, sizeof(struct mon_event_text)); 503 memset(mem, 0xe5, sizeof(struct mon_event_text));
460} 504}
461 505
506int __init mon_text_init(void)
507{
508 struct dentry *mondir;
509
510 mondir = debugfs_create_dir("usbmon", NULL);
511 if (IS_ERR(mondir)) {
512 printk(KERN_NOTICE TAG ": debugfs is not available\n");
513 return -ENODEV;
514 }
515 if (mondir == NULL) {
516 printk(KERN_NOTICE TAG ": unable to create usbmon directory\n");
517 return -ENODEV;
518 }
519 mon_dir = mondir;
520 return 0;
521}
522
523void __exit mon_text_exit(void)
524{
525 debugfs_remove(mon_dir);
526}
diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h
index ab9d02d5df77..4f949ce8a7f3 100644
--- a/drivers/usb/mon/usb_mon.h
+++ b/drivers/usb/mon/usb_mon.h
@@ -17,9 +17,11 @@
17struct mon_bus { 17struct mon_bus {
18 struct list_head bus_link; 18 struct list_head bus_link;
19 spinlock_t lock; 19 spinlock_t lock;
20 struct usb_bus *u_bus;
21
22 int text_inited;
20 struct dentry *dent_s; /* Debugging file */ 23 struct dentry *dent_s; /* Debugging file */
21 struct dentry *dent_t; /* Text interface file */ 24 struct dentry *dent_t; /* Text interface file */
22 struct usb_bus *u_bus;
23 int uses_dma; 25 int uses_dma;
24 26
25 /* Ref */ 27 /* Ref */
@@ -48,13 +50,35 @@ struct mon_reader {
48void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r); 50void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r);
49void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r); 51void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r);
50 52
53struct mon_bus *mon_bus_lookup(unsigned int num);
54
55int /*bool*/ mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus);
56void mon_text_del(struct mon_bus *mbus);
57// void mon_bin_add(struct mon_bus *);
58
59int __init mon_text_init(void);
60void __exit mon_text_exit(void);
61int __init mon_bin_init(void);
62void __exit mon_bin_exit(void);
63
51/* 64/*
52 */ 65 * DMA interface.
66 *
67 * XXX The vectored side needs a serious re-thinking. Abstracting vectors,
68 * like in Paolo's original patch, produces a double pkmap. We need an idea.
69*/
53extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len); 70extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len);
54 71
72struct mon_reader_bin;
73extern void mon_dmapeek_vec(const struct mon_reader_bin *rp,
74 unsigned int offset, dma_addr_t dma_addr, unsigned int len);
75extern unsigned int mon_copy_to_buff(const struct mon_reader_bin *rp,
76 unsigned int offset, const unsigned char *from, unsigned int len);
77
78/*
79 */
55extern struct mutex mon_lock; 80extern struct mutex mon_lock;
56 81
57extern const struct file_operations mon_fops_text;
58extern const struct file_operations mon_fops_stat; 82extern const struct file_operations mon_fops_stat;
59 83
60#endif /* __USB_MON_H */ 84#endif /* __USB_MON_H */