aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/scripting-engines/trace-event-python.c
blob: e87aa5d9696b41cca90c23bc9ba81529e449eab8 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
/*
 * trace-event-python.  Feed trace events to an embedded Python interpreter.
 *
 * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 */

#include <Python.h>

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>

#include "../../perf.h"
#include "../evsel.h"
#include "../util.h"
#include "../event.h"
#include "../thread.h"
#include "../trace-event.h"

PyMODINIT_FUNC initperf_trace_context(void);

#define FTRACE_MAX_EVENT				\
	((1 << (sizeof(unsigned short) * 8)) - 1)

struct event_format *events[FTRACE_MAX_EVENT];

#define MAX_FIELDS	64
#define N_COMMON_FIELDS	7

extern struct scripting_context *scripting_context;

static char *cur_field_name;
static int zero_flag_atom;

static PyObject *main_module, *main_dict;

static void handler_call_die(const char *handler_name)
{
	PyErr_Print();
	Py_FatalError("problem in Python trace event handler");
}

static void define_value(enum print_arg_type field_type,
			 const char *ev_name,
			 const char *field_name,
			 const char *field_value,
			 const char *field_str)
{
	const char *handler_name = "define_flag_value";
	PyObject *handler, *t, *retval;
	unsigned long long value;
	unsigned n = 0;

	if (field_type == PRINT_SYMBOL)
		handler_name = "define_symbolic_value";

	t = PyTuple_New(4);
	if (!t)
		Py_FatalError("couldn't create Python tuple");

	value = eval_flag(field_value);

	PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
	PyTuple_SetItem(t, n++, PyString_FromString(field_name));
	PyTuple_SetItem(t, n++, PyInt_FromLong(value));
	PyTuple_SetItem(t, n++, PyString_FromString(field_str));

	handler = PyDict_GetItemString(main_dict, handler_name);
	if (handler && PyCallable_Check(handler)) {
		retval = PyObject_CallObject(handler, t);
		if (retval == NULL)
			handler_call_die(handler_name);
	}

	Py_DECREF(t);
}

static void define_values(enum print_arg_type field_type,
			  struct print_flag_sym *field,
			  const char *ev_name,
			  const char *field_name)
{
	define_value(field_type, ev_name, field_name, field->value,
		     field->str);

	if (field->next)
		define_values(field_type, field->next, ev_name, field_name);
}

static void define_field(enum print_arg_type field_type,
			 const char *ev_name,
			 const char *field_name,
			 const char *delim)
{
	const char *handler_name = "define_flag_field";
	PyObject *handler, *t, *retval;
	unsigned n = 0;

	if (field_type == PRINT_SYMBOL)
		handler_name = "define_symbolic_field";

	if (field_type == PRINT_FLAGS)
		t = PyTuple_New(3);
	else
		t = PyTuple_New(2);
	if (!t)
		Py_FatalError("couldn't create Python tuple");

	PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
	PyTuple_SetItem(t, n++, PyString_FromString(field_name));
	if (field_type == PRINT_FLAGS)
		PyTuple_SetItem(t, n++, PyString_FromString(delim));

	handler = PyDict_GetItemString(main_dict, handler_name);
	if (handler && PyCallable_Check(handler)) {
		retval = PyObject_CallObject(handler, t);
		if (retval == NULL)
			handler_call_die(handler_name);
	}

	Py_DECREF(t);
}

static void define_event_symbols(struct event_format *event,
				 const char *ev_name,
				 struct print_arg *args)
{
	switch (args->type) {
	case PRINT_NULL:
		break;
	case PRINT_ATOM:
		define_value(PRINT_FLAGS, ev_name, cur_field_name, "0",
			     args->atom.atom);
		zero_flag_atom = 0;
		break;
	case PRINT_FIELD:
		if (cur_field_name)
			free(cur_field_name);
		cur_field_name = strdup(args->field.name);
		break;
	case PRINT_FLAGS:
		define_event_symbols(event, ev_name, args->flags.field);
		define_field(PRINT_FLAGS, ev_name, cur_field_name,
			     args->flags.delim);
		define_values(PRINT_FLAGS, args->flags.flags, ev_name,
			      cur_field_name);
		break;
	case PRINT_SYMBOL:
		define_event_symbols(event, ev_name, args->symbol.field);
		define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL);
		define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
			      cur_field_name);
		break;
	case PRINT_HEX:
		define_event_symbols(event, ev_name, args->hex.field);
		define_event_symbols(event, ev_name, args->hex.size);
		break;
	case PRINT_STRING:
		break;
	case PRINT_TYPE:
		define_event_symbols(event, ev_name, args->typecast.item);
		break;
	case PRINT_OP:
		if (strcmp(args->op.op, ":") == 0)
			zero_flag_atom = 1;
		define_event_symbols(event, ev_name, args->op.left);
		define_event_symbols(event, ev_name, args->op.right);
		break;
	default:
		/* gcc warns for these? */
	case PRINT_BSTRING:
	case PRINT_DYNAMIC_ARRAY:
	case PRINT_FUNC:
		/* we should warn... */
		return;
	}

	if (args->next)
		define_event_symbols(event, ev_name, args->next);
}

static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
{
	static char ev_name[256];
	struct event_format *event;
	int type = evsel->attr.config;

	/*
 	 * XXX: Do we really need to cache this since now we have evsel->tp_format
 	 * cached already? Need to re-read this "cache" routine that as well calls
 	 * define_event_symbols() :-\
 	 */
	if (events[type])
		return events[type];

	events[type] = event = evsel->tp_format;
	if (!event)
		return NULL;

	sprintf(ev_name, "%s__%s", event->system, event->name);

	define_event_symbols(event, ev_name, event->print_fmt.args);

	return event;
}

static void python_process_tracepoint(union perf_event *perf_event
				      __maybe_unused,
				 struct perf_sample *sample,
				 struct perf_evsel *evsel,
				 struct machine *machine __maybe_unused,
				 struct addr_location *al)
{
	PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
	static char handler_name[256];
	struct format_field *field;
	unsigned long long val;
	unsigned long s, ns;
	struct event_format *event;
	unsigned n = 0;
	int pid;
	int cpu = sample->cpu;
	void *data = sample->raw_data;
	unsigned long long nsecs = sample->time;
	struct thread *thread = al->thread;
	char *comm = thread->comm;

	t = PyTuple_New(MAX_FIELDS);
	if (!t)
		Py_FatalError("couldn't create Python tuple");

	event = find_cache_event(evsel);
	if (!event)
		die("ug! no event found for type %d", (int)evsel->attr.config);

	pid = raw_field_value(event, "common_pid", data);

	sprintf(handler_name, "%s__%s", event->system, event->name);

	handler = PyDict_GetItemString(main_dict, handler_name);
	if (handler && !PyCallable_Check(handler))
		handler = NULL;
	if (!handler) {
		dict = PyDict_New();
		if (!dict)
			Py_FatalError("couldn't create Python dict");
	}
	s = nsecs / NSECS_PER_SEC;
	ns = nsecs - s * NSECS_PER_SEC;

	scripting_context->event_data = data;
	scripting_context->pevent = evsel->tp_format->pevent;

	context = PyCObject_FromVoidPtr(scripting_context, NULL);

	PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
	PyTuple_SetItem(t, n++, context);

	if (handler) {
		PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
		PyTuple_SetItem(t, n++, PyInt_FromLong(s));
		PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
		PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
		PyTuple_SetItem(t, n++, PyString_FromString(comm));
	} else {
		PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
		PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
		PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
		PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
		PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
	}
	for (field = event->format.fields; field; field = field->next) {
		if (field->flags & FIELD_IS_STRING) {
			int offset;
			if (field->flags & FIELD_IS_DYNAMIC) {
				offset = *(int *)(data + field->offset);
				offset &= 0xffff;
			} else
				offset = field->offset;
			obj = PyString_FromString((char *)data + offset);
		} else { /* FIELD_IS_NUMERIC */
			val = read_size(event, data + field->offset,
					field->size);
			if (field->flags & FIELD_IS_SIGNED) {
				if ((long long)val >= LONG_MIN &&
				    (long long)val <= LONG_MAX)
					obj = PyInt_FromLong(val);
				else
					obj = PyLong_FromLongLong(val);
			} else {
				if (val <= LONG_MAX)
					obj = PyInt_FromLong(val);
				else
					obj = PyLong_FromUnsignedLongLong(val);
			}
		}
		if (handler)
			PyTuple_SetItem(t, n++, obj);
		else
			PyDict_SetItemString(dict, field->name, obj);

	}
	if (!handler)
		PyTuple_SetItem(t, n++, dict);

	if (_PyTuple_Resize(&t, n) == -1)
		Py_FatalError("error resizing Python tuple");

	if (handler) {
		retval = PyObject_CallObject(handler, t);
		if (retval == NULL)
			handler_call_die(handler_name);
	} else {
		handler = PyDict_GetItemString(main_dict, "trace_unhandled");
		if (handler && PyCallable_Check(handler)) {

			retval = PyObject_CallObject(handler, t);
			if (retval == NULL)
				handler_call_die("trace_unhandled");
		}
		Py_DECREF(dict);
	}

	Py_DECREF(t);
}

static void python_process_general_event(union perf_event *perf_event
					 __maybe_unused,
					 struct perf_sample *sample,
					 struct perf_evsel *evsel,
					 struct machine *machine __maybe_unused,
					 struct addr_location *al)
{
	PyObject *handler, *retval, *t, *dict;
	static char handler_name[64];
	unsigned n = 0;
	struct thread *thread = al->thread;

	/*
	 * Use the MAX_FIELDS to make the function expandable, though
	 * currently there is only one item for the tuple.
	 */
	t = PyTuple_New(MAX_FIELDS);
	if (!t)
		Py_FatalError("couldn't create Python tuple");

	dict = PyDict_New();
	if (!dict)
		Py_FatalError("couldn't create Python dictionary");

	snprintf(handler_name, sizeof(handler_name), "%s", "process_event");

	handler = PyDict_GetItemString(main_dict, handler_name);
	if (!handler || !PyCallable_Check(handler))
		goto exit;

	PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
	PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize(
			(const char *)&evsel->attr, sizeof(evsel->attr)));
	PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize(
			(const char *)sample, sizeof(*sample)));
	PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize(
			(const char *)sample->raw_data, sample->raw_size));
	PyDict_SetItemString(dict, "comm",
			PyString_FromString(thread->comm));
	if (al->map) {
		PyDict_SetItemString(dict, "dso",
			PyString_FromString(al->map->dso->name));
	}
	if (al->sym) {
		PyDict_SetItemString(dict, "symbol",
			PyString_FromString(al->sym->name));
	}

	PyTuple_SetItem(t, n++, dict);
	if (_PyTuple_Resize(&t, n) == -1)
		Py_FatalError("error resizing Python tuple");

	retval = PyObject_CallObject(handler, t);
	if (retval == NULL)
		handler_call_die(handler_name);
exit:
	Py_DECREF(dict);
	Py_DECREF(t);
}

static void python_process_event(union perf_event *perf_event,
				 struct perf_sample *sample,
				 struct perf_evsel *evsel,
				 struct machine *machine,
				 struct addr_location *al)
{
	switch (evsel->attr.type) {
	case PERF_TYPE_TRACEPOINT:
		python_process_tracepoint(perf_event, sample, evsel,
					  machine, al);
		break;
	/* Reserve for future process_hw/sw/raw APIs */
	default:
		python_process_general_event(perf_event, sample, evsel,
					     machine, al);
	}
}

static int run_start_sub(void)
{
	PyObject *handler, *retval;
	int err = 0;

	main_module = PyImport_AddModule("__main__");
	if (main_module == NULL)
		return -1;
	Py_INCREF(main_module);

	main_dict = PyModule_GetDict(main_module);
	if (main_dict == NULL) {
		err = -1;
		goto error;
	}
	Py_INCREF(main_dict);

	handler = PyDict_GetItemString(main_dict, "trace_begin");
	if (handler == NULL || !PyCallable_Check(handler))
		goto out;

	retval = PyObject_CallObject(handler, NULL);
	if (retval == NULL)
		handler_call_die("trace_begin");

	Py_DECREF(retval);
	return err;
error:
	Py_XDECREF(main_dict);
	Py_XDECREF(main_module);
out:
	return err;
}

/*
 * Start trace script
 */
static int python_start_script(const char *script, int argc, const char **argv)
{
	const char **command_line;
	char buf[PATH_MAX];
	int i, err = 0;
	FILE *fp;

	command_line = malloc((argc + 1) * sizeof(const char *));
	command_line[0] = script;
	for (i = 1; i < argc + 1; i++)
		command_line[i] = argv[i - 1];

	Py_Initialize();

	initperf_trace_context();

	PySys_SetArgv(argc + 1, (char **)command_line);

	fp = fopen(script, "r");
	if (!fp) {
		sprintf(buf, "Can't open python script \"%s\"", script);
		perror(buf);
		err = -1;
		goto error;
	}

	err = PyRun_SimpleFile(fp, script);
	if (err) {
		fprintf(stderr, "Error running python script %s\n", script);
		goto error;
	}

	err = run_start_sub();
	if (err) {
		fprintf(stderr, "Error starting python script %s\n", script);
		goto error;
	}

	free(command_line);

	return err;
error:
	Py_Finalize();
	free(command_line);

	return err;
}

/*
 * Stop trace script
 */
static int python_stop_script(void)
{
	PyObject *handler, *retval;
	int err = 0;

	handler = PyDict_GetItemString(main_dict, "trace_end");
	if (handler == NULL || !PyCallable_Check(handler))
		goto out;

	retval = PyObject_CallObject(handler, NULL);
	if (retval == NULL)
		handler_call_die("trace_end");
	else
		Py_DECREF(retval);
out:
	Py_XDECREF(main_dict);
	Py_XDECREF(main_module);
	Py_Finalize();

	return err;
}

static int python_generate_script(struct pevent *pevent, const char *outfile)
{
	struct event_format *event = NULL;
	struct format_field *f;
	char fname[PATH_MAX];
	int not_first, count;
	FILE *ofp;

	sprintf(fname, "%s.py", outfile);
	ofp = fopen(fname, "w");
	if (ofp == NULL) {
		fprintf(stderr, "couldn't open %s\n", fname);
		return -1;
	}
	fprintf(ofp, "# perf script event handlers, "
		"generated by perf script -g python\n");

	fprintf(ofp, "# Licensed under the terms of the GNU GPL"
		" License version 2\n\n");

	fprintf(ofp, "# The common_* event handler fields are the most useful "
		"fields common to\n");

	fprintf(ofp, "# all events.  They don't necessarily correspond to "
		"the 'common_*' fields\n");

	fprintf(ofp, "# in the format files.  Those fields not available as "
		"handler params can\n");

	fprintf(ofp, "# be retrieved using Python functions of the form "
		"common_*(context).\n");

	fprintf(ofp, "# See the perf-trace-python Documentation for the list "
		"of available functions.\n\n");

	fprintf(ofp, "import os\n");
	fprintf(ofp, "import sys\n\n");

	fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n");
	fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n");
	fprintf(ofp, "\nfrom perf_trace_context import *\n");
	fprintf(ofp, "from Core import *\n\n\n");

	fprintf(ofp, "def trace_begin():\n");
	fprintf(ofp, "\tprint \"in trace_begin\"\n\n");

	fprintf(ofp, "def trace_end():\n");
	fprintf(ofp, "\tprint \"in trace_end\"\n\n");

	while ((event = trace_find_next_event(pevent, event))) {
		fprintf(ofp, "def %s__%s(", event->system, event->name);
		fprintf(ofp, "event_name, ");
		fprintf(ofp, "context, ");
		fprintf(ofp, "common_cpu,\n");
		fprintf(ofp, "\tcommon_secs, ");
		fprintf(ofp, "common_nsecs, ");
		fprintf(ofp, "common_pid, ");
		fprintf(ofp, "common_comm,\n\t");

		not_first = 0;
		count = 0;

		for (f = event->format.fields; f; f = f->next) {
			if (not_first++)
				fprintf(ofp, ", ");
			if (++count % 5 == 0)
				fprintf(ofp, "\n\t");

			fprintf(ofp, "%s", f->name);
		}
		fprintf(ofp, "):\n");

		fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
			"common_secs, common_nsecs,\n\t\t\t"
			"common_pid, common_comm)\n\n");

		fprintf(ofp, "\t\tprint \"");

		not_first = 0;
		count = 0;

		for (f = event->format.fields; f; f = f->next) {
			if (not_first++)
				fprintf(ofp, ", ");
			if (count && count % 3 == 0) {
				fprintf(ofp, "\" \\\n\t\t\"");
			}
			count++;

			fprintf(ofp, "%s=", f->name);
			if (f->flags & FIELD_IS_STRING ||
			    f->flags & FIELD_IS_FLAG ||
			    f->flags & FIELD_IS_SYMBOLIC)
				fprintf(ofp, "%%s");
			else if (f->flags & FIELD_IS_SIGNED)
				fprintf(ofp, "%%d");
			else
				fprintf(ofp, "%%u");
		}

		fprintf(ofp, "\\n\" %% \\\n\t\t(");

		not_first = 0;
		count = 0;

		for (f = event->format.fields; f; f = f->next) {
			if (not_first++)
				fprintf(ofp, ", ");

			if (++count % 5 == 0)
				fprintf(ofp, "\n\t\t");

			if (f->flags & FIELD_IS_FLAG) {
				if ((count - 1) % 5 != 0) {
					fprintf(ofp, "\n\t\t");
					count = 4;
				}
				fprintf(ofp, "flag_str(\"");
				fprintf(ofp, "%s__%s\", ", event->system,
					event->name);
				fprintf(ofp, "\"%s\", %s)", f->name,
					f->name);
			} else if (f->flags & FIELD_IS_SYMBOLIC) {
				if ((count - 1) % 5 != 0) {
					fprintf(ofp, "\n\t\t");
					count = 4;
				}
				fprintf(ofp, "symbol_str(\"");
				fprintf(ofp, "%s__%s\", ", event->system,
					event->name);
				fprintf(ofp, "\"%s\", %s)", f->name,
					f->name);
			} else
				fprintf(ofp, "%s", f->name);
		}

		fprintf(ofp, "),\n\n");
	}

	fprintf(ofp, "def trace_unhandled(event_name, context, "
		"event_fields_dict):\n");

	fprintf(ofp, "\t\tprint ' '.join(['%%s=%%s'%%(k,str(v))"
		"for k,v in sorted(event_fields_dict.items())])\n\n");

	fprintf(ofp, "def print_header("
		"event_name, cpu, secs, nsecs, pid, comm):\n"
		"\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
		"(event_name, cpu, secs, nsecs, pid, comm),\n");

	fclose(ofp);

	fprintf(stderr, "generated Python script: %s\n", fname);

	return 0;
}

struct scripting_ops python_scripting_ops = {
	.name = "Python",
	.start_script = python_start_script,
	.stop_script = python_stop_script,
	.process_event = python_process_event,
	.generate_script = python_generate_script,
};
an>struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; u32 intr_status; intr_status = ioread16(ioaddr + IntrStatus); /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ if (rp->quirks & rqStatusWBRace) intr_status |= ioread8(ioaddr + IntrStatus2) << 16; return intr_status; } /* * Get power related registers into sane state. * Notify user about past WOL event. */ static void rhine_power_init(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; u16 wolstat; if (rp->quirks & rqWOL) { /* Make sure chip is in power state D0 */ iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); /* Disable "force PME-enable" */ iowrite8(0x80, ioaddr + WOLcgClr); /* Clear power-event config bits (WOL) */ iowrite8(0xFF, ioaddr + WOLcrClr); /* More recent cards can manage two additional patterns */ if (rp->quirks & rq6patterns) iowrite8(0x03, ioaddr + WOLcrClr1); /* Save power-event status bits */ wolstat = ioread8(ioaddr + PwrcsrSet); if (rp->quirks & rq6patterns) wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8; /* Clear power-event status bits */ iowrite8(0xFF, ioaddr + PwrcsrClr); if (rp->quirks & rq6patterns) iowrite8(0x03, ioaddr + PwrcsrClr1); if (wolstat) { char *reason; switch (wolstat) { case WOLmagic: reason = "Magic packet"; break; case WOLlnkon: reason = "Link went up"; break; case WOLlnkoff: reason = "Link went down"; break; case WOLucast: reason = "Unicast packet"; break; case WOLbmcast: reason = "Multicast/broadcast packet"; break; default: reason = "Unknown"; } printk(KERN_INFO "%s: Woke system up. Reason: %s.\n", DRV_NAME, reason); } } } static void rhine_chip_reset(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; iowrite8(Cmd1Reset, ioaddr + ChipCmd1); IOSYNC; if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { printk(KERN_INFO "%s: Reset not complete yet. " "Trying harder.\n", DRV_NAME); /* Force reset */ if (rp->quirks & rqForceReset) iowrite8(0x40, ioaddr + MiscCmd); /* Reset can take somewhat longer (rare) */ RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset)); } if (debug > 1) printk(KERN_INFO "%s: Reset %s.\n", dev->name, (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ? "failed" : "succeeded"); } #ifdef USE_MMIO static void enable_mmio(long pioaddr, u32 quirks) { int n; if (quirks & rqRhineI) { /* More recent docs say that this bit is reserved ... */ n = inb(pioaddr + ConfigA) | 0x20; outb(n, pioaddr + ConfigA); } else { n = inb(pioaddr + ConfigD) | 0x80; outb(n, pioaddr + ConfigD); } } #endif /* * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM * (plus 0x6C for Rhine-I/II) */ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; outb(0x20, pioaddr + MACRegEEcsr); RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20)); #ifdef USE_MMIO /* * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable * MMIO. If reloading EEPROM was done first this could be avoided, but * it is not known if that still works with the "win98-reboot" problem. */ enable_mmio(pioaddr, rp->quirks); #endif /* Turn off EEPROM-controlled wake-up (magic packet) */ if (rp->quirks & rqWOL) iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA); } #ifdef CONFIG_NET_POLL_CONTROLLER static void rhine_poll(struct net_device *dev) { disable_irq(dev->irq); rhine_interrupt(dev->irq, (void *)dev); enable_irq(dev->irq); } #endif static int rhine_napipoll(struct napi_struct *napi, int budget) { struct rhine_private *rp = container_of(napi, struct rhine_private, napi); struct net_device *dev = rp->dev; void __iomem *ioaddr = rp->base; int work_done; work_done = rhine_rx(dev, budget); if (work_done < budget) { netif_rx_complete(dev, napi); iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | IntrTxDone | IntrTxError | IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); } return work_done; } static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr) { struct rhine_private *rp = netdev_priv(dev); /* Reset the chip to erase previous misconfiguration. */ rhine_chip_reset(dev); /* Rhine-I needs extra time to recuperate before EEPROM reload */ if (rp->quirks & rqRhineI) msleep(5); /* Reload EEPROM controlled bytes cleared by soft reset */ rhine_reload_eeprom(pioaddr, dev); } static int __devinit rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct rhine_private *rp; int i, rc; u32 quirks; long pioaddr; long memaddr; void __iomem *ioaddr; int io_size, phy_id; const char *name; #ifdef USE_MMIO int bar = 1; #else int bar = 0; #endif /* when built into the kernel, we only print version if device is found */ #ifndef MODULE static int printed_version; if (!printed_version++) printk(version); #endif io_size = 256; phy_id = 0; quirks = 0; name = "Rhine"; if (pdev->revision < VTunknown0) { quirks = rqRhineI; io_size = 128; } else if (pdev->revision >= VT6102) { quirks = rqWOL | rqForceReset; if (pdev->revision < VT6105) { name = "Rhine II"; quirks |= rqStatusWBRace; /* Rhine-II exclusive */ } else { phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ if (pdev->revision >= VT6105_B0) quirks |= rq6patterns; if (pdev->revision < VT6105M) name = "Rhine III"; else name = "Rhine III (Management Adapter)"; } } rc = pci_enable_device(pdev); if (rc) goto err_out; /* this should always be supported */ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (rc) { printk(KERN_ERR "32-bit PCI DMA addresses not supported by " "the card!?\n"); goto err_out; } /* sanity check */ if ((pci_resource_len(pdev, 0) < io_size) || (pci_resource_len(pdev, 1) < io_size)) { rc = -EIO; printk(KERN_ERR "Insufficient PCI resources, aborting\n"); goto err_out; } pioaddr = pci_resource_start(pdev, 0); memaddr = pci_resource_start(pdev, 1); pci_set_master(pdev); dev = alloc_etherdev(sizeof(struct rhine_private)); if (!dev) { rc = -ENOMEM; printk(KERN_ERR "alloc_etherdev failed\n"); goto err_out; } SET_NETDEV_DEV(dev, &pdev->dev); rp = netdev_priv(dev); rp->dev = dev; rp->quirks = quirks; rp->pioaddr = pioaddr; rp->pdev = pdev; rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_free_netdev; ioaddr = pci_iomap(pdev, bar, io_size); if (!ioaddr) { rc = -EIO; printk(KERN_ERR "ioremap failed for device %s, region 0x%X " "@ 0x%lX\n", pci_name(pdev), io_size, memaddr); goto err_out_free_res; } #ifdef USE_MMIO enable_mmio(pioaddr, quirks); /* Check that selected MMIO registers match the PIO ones */ i = 0; while (mmio_verify_registers[i]) { int reg = mmio_verify_registers[i++]; unsigned char a = inb(pioaddr+reg); unsigned char b = readb(ioaddr+reg); if (a != b) { rc = -EIO; printk(KERN_ERR "MMIO do not match PIO [%02x] " "(%02x != %02x)\n", reg, a, b); goto err_out_unmap; } } #endif /* USE_MMIO */ dev->base_addr = (unsigned long)ioaddr; rp->base = ioaddr; /* Get chip registers into a sane state */ rhine_power_init(dev); rhine_hw_init(dev, pioaddr); for (i = 0; i < 6; i++) dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); if (!is_valid_ether_addr(dev->perm_addr)) { rc = -EIO; printk(KERN_ERR "Invalid MAC address\n"); goto err_out_unmap; } /* For Rhine-I/II, phy_id is loaded from EEPROM */ if (!phy_id) phy_id = ioread8(ioaddr + 0x6C); dev->irq = pdev->irq; spin_lock_init(&rp->lock); rp->mii_if.dev = dev; rp->mii_if.mdio_read = mdio_read; rp->mii_if.mdio_write = mdio_write; rp->mii_if.phy_id_mask = 0x1f; rp->mii_if.reg_num_mask = 0x1f; /* The chip-specific entries in the device structure. */ dev->open = rhine_open; dev->hard_start_xmit = rhine_start_tx; dev->stop = rhine_close; dev->get_stats = rhine_get_stats; dev->set_multicast_list = rhine_set_rx_mode; dev->do_ioctl = netdev_ioctl; dev->ethtool_ops = &netdev_ethtool_ops; dev->tx_timeout = rhine_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = rhine_poll; #endif netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; /* dev->name not defined before register_netdev()! */ rc = register_netdev(dev); if (rc) goto err_out_unmap; printk(KERN_INFO "%s: VIA %s at 0x%lx, %pM, IRQ %d.\n", dev->name, name, #ifdef USE_MMIO memaddr, #else (long)ioaddr, #endif dev->dev_addr, pdev->irq); pci_set_drvdata(pdev, dev); { u16 mii_cmd; int mii_status = mdio_read(dev, phy_id, 1); mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; mdio_write(dev, phy_id, MII_BMCR, mii_cmd); if (mii_status != 0xffff && mii_status != 0x0000) { rp->mii_if.advertising = mdio_read(dev, phy_id, 4); printk(KERN_INFO "%s: MII PHY found at address " "%d, status 0x%4.4x advertising %4.4x " "Link %4.4x.\n", dev->name, phy_id, mii_status, rp->mii_if.advertising, mdio_read(dev, phy_id, 5)); /* set IFF_RUNNING */ if (mii_status & BMSR_LSTATUS) netif_carrier_on(dev); else netif_carrier_off(dev); } } rp->mii_if.phy_id = phy_id; if (debug > 1 && avoid_D3) printk(KERN_INFO "%s: No D3 power state at shutdown.\n", dev->name); return 0; err_out_unmap: pci_iounmap(pdev, ioaddr); err_out_free_res: pci_release_regions(pdev); err_out_free_netdev: free_netdev(dev); err_out: return rc; } static int alloc_ring(struct net_device* dev) { struct rhine_private *rp = netdev_priv(dev); void *ring; dma_addr_t ring_dma; ring = pci_alloc_consistent(rp->pdev, RX_RING_SIZE * sizeof(struct rx_desc) + TX_RING_SIZE * sizeof(struct tx_desc), &ring_dma); if (!ring) { printk(KERN_ERR "Could not allocate DMA memory.\n"); return -ENOMEM; } if (rp->quirks & rqRhineI) { rp->tx_bufs = pci_alloc_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, &rp->tx_bufs_dma); if (rp->tx_bufs == NULL) { pci_free_consistent(rp->pdev, RX_RING_SIZE * sizeof(struct rx_desc) + TX_RING_SIZE * sizeof(struct tx_desc), ring, ring_dma); return -ENOMEM; } } rp->rx_ring = ring; rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); rp->rx_ring_dma = ring_dma; rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); return 0; } static void free_ring(struct net_device* dev) { struct rhine_private *rp = netdev_priv(dev); pci_free_consistent(rp->pdev, RX_RING_SIZE * sizeof(struct rx_desc) + TX_RING_SIZE * sizeof(struct tx_desc), rp->rx_ring, rp->rx_ring_dma); rp->tx_ring = NULL; if (rp->tx_bufs) pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, rp->tx_bufs, rp->tx_bufs_dma); rp->tx_bufs = NULL; } static void alloc_rbufs(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); dma_addr_t next; int i; rp->dirty_rx = rp->cur_rx = 0; rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); rp->rx_head_desc = &rp->rx_ring[0]; next = rp->rx_ring_dma; /* Init the ring entries */ for (i = 0; i < RX_RING_SIZE; i++) { rp->rx_ring[i].rx_status = 0; rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); next += sizeof(struct rx_desc); rp->rx_ring[i].next_desc = cpu_to_le32(next); rp->rx_skbuff[i] = NULL; } /* Mark the last entry as wrapping the ring. */ rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz); rp->rx_skbuff[i] = skb; if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ rp->rx_skbuff_dma[i] = pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, PCI_DMA_FROMDEVICE); rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); } rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); } static void free_rbufs(struct net_device* dev) { struct rhine_private *rp = netdev_priv(dev); int i; /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { rp->rx_ring[i].rx_status = 0; rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ if (rp->rx_skbuff[i]) { pci_unmap_single(rp->pdev, rp->rx_skbuff_dma[i], rp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(rp->rx_skbuff[i]); } rp->rx_skbuff[i] = NULL; } } static void alloc_tbufs(struct net_device* dev) { struct rhine_private *rp = netdev_priv(dev); dma_addr_t next; int i; rp->dirty_tx = rp->cur_tx = 0; next = rp->tx_ring_dma; for (i = 0; i < TX_RING_SIZE; i++) { rp->tx_skbuff[i] = NULL; rp->tx_ring[i].tx_status = 0; rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); next += sizeof(struct tx_desc); rp->tx_ring[i].next_desc = cpu_to_le32(next); if (rp->quirks & rqRhineI) rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; } rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); } static void free_tbufs(struct net_device* dev) { struct rhine_private *rp = netdev_priv(dev); int i; for (i = 0; i < TX_RING_SIZE; i++) { rp->tx_ring[i].tx_status = 0; rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ if (rp->tx_skbuff[i]) { if (rp->tx_skbuff_dma[i]) { pci_unmap_single(rp->pdev, rp->tx_skbuff_dma[i], rp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); } dev_kfree_skb(rp->tx_skbuff[i]); } rp->tx_skbuff[i] = NULL; rp->tx_buf[i] = NULL; } } static void rhine_check_media(struct net_device *dev, unsigned int init_media) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; mii_check_media(&rp->mii_if, debug, init_media); if (rp->mii_if.full_duplex) iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, ioaddr + ChipCmd1); else iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, ioaddr + ChipCmd1); if (debug > 1) printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name, rp->mii_if.force_media, netif_carrier_ok(dev)); } /* Called after status of force_media possibly changed */ static void rhine_set_carrier(struct mii_if_info *mii) { if (mii->force_media) { /* autoneg is off: Link is always assumed to be up */ if (!netif_carrier_ok(mii->dev)) netif_carrier_on(mii->dev); } else /* Let MMI library update carrier status */ rhine_check_media(mii->dev, 0); if (debug > 1) printk(KERN_INFO "%s: force_media %d, carrier %d\n", mii->dev->name, mii->force_media, netif_carrier_ok(mii->dev)); } static void init_registers(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; int i; for (i = 0; i < 6; i++) iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); /* Initialize other registers. */ iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ /* Configure initial FIFO thresholds. */ iowrite8(0x20, ioaddr + TxConfig); rp->tx_thresh = 0x20; rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); rhine_set_rx_mode(dev); napi_enable(&rp->napi); /* Enable interrupts by setting the interrupt mask. */ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | IntrTxDone | IntrTxError | IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), ioaddr + ChipCmd); rhine_check_media(dev, 1); } /* Enable MII link status auto-polling (required for IntrLinkChange) */ static void rhine_enable_linkmon(void __iomem *ioaddr) { iowrite8(0, ioaddr + MIICmd); iowrite8(MII_BMSR, ioaddr + MIIRegAddr); iowrite8(0x80, ioaddr + MIICmd); RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20)); iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); } /* Disable MII link status auto-polling (required for MDIO access) */ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) { iowrite8(0, ioaddr + MIICmd); if (quirks & rqRhineI) { iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR /* Can be called from ISR. Evil. */ mdelay(1); /* 0x80 must be set immediately before turning it off */ iowrite8(0x80, ioaddr + MIICmd); RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20); /* Heh. Now clear 0x80 again. */ iowrite8(0, ioaddr + MIICmd); } else RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80); } /* Read and write over the MII Management Data I/O (MDIO) interface. */ static int mdio_read(struct net_device *dev, int phy_id, int regnum) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; int result; rhine_disable_linkmon(ioaddr, rp->quirks); /* rhine_disable_linkmon already cleared MIICmd */ iowrite8(phy_id, ioaddr + MIIPhyAddr); iowrite8(regnum, ioaddr + MIIRegAddr); iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40)); result = ioread16(ioaddr + MIIData); rhine_enable_linkmon(ioaddr); return result; } static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; rhine_disable_linkmon(ioaddr, rp->quirks); /* rhine_disable_linkmon already cleared MIICmd */ iowrite8(phy_id, ioaddr + MIIPhyAddr); iowrite8(regnum, ioaddr + MIIRegAddr); iowrite16(value, ioaddr + MIIData); iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20)); rhine_enable_linkmon(ioaddr); } static int rhine_open(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; int rc; rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name, dev); if (rc) return rc; if (debug > 1) printk(KERN_DEBUG "%s: rhine_open() irq %d.\n", dev->name, rp->pdev->irq); rc = alloc_ring(dev); if (rc) { free_irq(rp->pdev->irq, dev); return rc; } alloc_rbufs(dev); alloc_tbufs(dev); rhine_chip_reset(dev); init_registers(dev); if (debug > 2) printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x " "MII status: %4.4x.\n", dev->name, ioread16(ioaddr + ChipCmd), mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); netif_start_queue(dev); return 0; } static void rhine_tx_timeout(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " "%4.4x, resetting...\n", dev->name, ioread16(ioaddr + IntrStatus), mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); /* protect against concurrent rx interrupts */ disable_irq(rp->pdev->irq); napi_disable(&rp->napi); spin_lock(&rp->lock); /* clear all descriptors */ free_tbufs(dev); free_rbufs(dev); alloc_tbufs(dev); alloc_rbufs(dev); /* Reinitialize the hardware. */ rhine_chip_reset(dev); init_registers(dev); spin_unlock(&rp->lock); enable_irq(rp->pdev->irq); dev->trans_start = jiffies; rp->stats.tx_errors++; netif_wake_queue(dev); } static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; unsigned entry; /* Caution: the write order is important here, set the field with the "ownership" bits last. */ /* Calculate the next Tx descriptor entry. */ entry = rp->cur_tx % TX_RING_SIZE; if (skb_padto(skb, ETH_ZLEN)) return 0; rp->tx_skbuff[entry] = skb; if ((rp->quirks & rqRhineI) && (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) { /* Must use alignment buffer. */ if (skb->len > PKT_BUF_SZ) { /* packet too long, drop it */ dev_kfree_skb(skb); rp->tx_skbuff[entry] = NULL; rp->stats.tx_dropped++; return 0; } /* Padding is not copied and so must be redone. */ skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); if (skb->len < ETH_ZLEN) memset(rp->tx_buf[entry] + skb->len, 0, ETH_ZLEN - skb->len); rp->tx_skbuff_dma[entry] = 0; rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + (rp->tx_buf[entry] - rp->tx_bufs)); } else { rp->tx_skbuff_dma[entry] = pci_map_single(rp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); } rp->tx_ring[entry].desc_length = cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); /* lock eth irq */ spin_lock_irq(&rp->lock); wmb(); rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); wmb(); rp->cur_tx++; /* Non-x86 Todo: explicitly flush cache lines here. */ /* Wake the potentially-idle transmit channel */ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, ioaddr + ChipCmd1); IOSYNC; if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) netif_stop_queue(dev); dev->trans_start = jiffies; spin_unlock_irq(&rp->lock); if (debug > 4) { printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, rp->cur_tx-1, entry); } return 0; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static irqreturn_t rhine_interrupt(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; u32 intr_status; int boguscnt = max_interrupt_work; int handled = 0; while ((intr_status = get_intr_status(dev))) { handled = 1; /* Acknowledge all of the current interrupt sources ASAP. */ if (intr_status & IntrTxDescRace) iowrite8(0x08, ioaddr + IntrStatus2); iowrite16(intr_status & 0xffff, ioaddr + IntrStatus); IOSYNC; if (debug > 4) printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n", dev->name, intr_status); if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { iowrite16(IntrTxAborted | IntrTxDone | IntrTxError | IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); netif_rx_schedule(dev, &rp->napi); } if (intr_status & (IntrTxErrSummary | IntrTxDone)) { if (intr_status & IntrTxErrSummary) { /* Avoid scavenging before Tx engine turned off */ RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn)); if (debug > 2 && ioread8(ioaddr+ChipCmd) & CmdTxOn) printk(KERN_WARNING "%s: " "rhine_interrupt() Tx engine " "still on.\n", dev->name); } rhine_tx(dev); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (IntrPCIErr | IntrLinkChange | IntrStatsMax | IntrTxError | IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) rhine_error(dev, intr_status); if (--boguscnt < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=%#8.8x.\n", dev->name, intr_status); break; } } if (debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n", dev->name, ioread16(ioaddr + IntrStatus)); return IRQ_RETVAL(handled); } /* This routine is logically part of the interrupt handler, but isolated for clarity. */ static void rhine_tx(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; spin_lock(&rp->lock); /* find and cleanup dirty tx descriptors */ while (rp->dirty_tx != rp->cur_tx) { txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); if (debug > 6) printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n", entry, txstatus); if (txstatus & DescOwn) break; if (txstatus & 0x8000) { if (debug > 1) printk(KERN_DEBUG "%s: Transmit error, " "Tx status %8.8x.\n", dev->name, txstatus); rp->stats.tx_errors++; if (txstatus & 0x0400) rp->stats.tx_carrier_errors++; if (txstatus & 0x0200) rp->stats.tx_window_errors++; if (txstatus & 0x0100) rp->stats.tx_aborted_errors++; if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++; if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || (txstatus & 0x0800) || (txstatus & 0x1000)) { rp->stats.tx_fifo_errors++; rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); break; /* Keep the skb - we try again */ } /* Transmitter restarted in 'abnormal' handler. */ } else { if (rp->quirks & rqRhineI) rp->stats.collisions += (txstatus >> 3) & 0x0F; else rp->stats.collisions += txstatus & 0x0F; if (debug > 6) printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n", (txstatus >> 3) & 0xF, txstatus & 0xF); rp->stats.tx_bytes += rp->tx_skbuff[entry]->len; rp->stats.tx_packets++; } /* Free the original skb. */ if (rp->tx_skbuff_dma[entry]) { pci_unmap_single(rp->pdev, rp->tx_skbuff_dma[entry], rp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); } dev_kfree_skb_irq(rp->tx_skbuff[entry]); rp->tx_skbuff[entry] = NULL; entry = (++rp->dirty_tx) % TX_RING_SIZE; } if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) netif_wake_queue(dev); spin_unlock(&rp->lock); } /* Process up to limit frames from receive ring */ static int rhine_rx(struct net_device *dev, int limit) { struct rhine_private *rp = netdev_priv(dev); int count; int entry = rp->cur_rx % RX_RING_SIZE; if (debug > 4) { printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", dev->name, entry, le32_to_cpu(rp->rx_head_desc->rx_status)); } /* If EOP is set on the next entry, it's a new packet. Send it up. */ for (count = 0; count < limit; ++count) { struct rx_desc *desc = rp->rx_head_desc; u32 desc_status = le32_to_cpu(desc->rx_status); int data_size = desc_status >> 16; if (desc_status & DescOwn) break; if (debug > 4) printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", desc_status); if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { if ((desc_status & RxWholePkt) != RxWholePkt) { printk(KERN_WARNING "%s: Oversized Ethernet " "frame spanned multiple buffers, entry " "%#x length %d status %8.8x!\n", dev->name, entry, data_size, desc_status); printk(KERN_WARNING "%s: Oversized Ethernet " "frame %p vs %p.\n", dev->name, rp->rx_head_desc, &rp->rx_ring[entry]); rp->stats.rx_length_errors++; } else if (desc_status & RxErr) { /* There was a error. */ if (debug > 2) printk(KERN_DEBUG "rhine_rx() Rx " "error was %8.8x.\n", desc_status); rp->stats.rx_errors++; if (desc_status & 0x0030) rp->stats.rx_length_errors++; if (desc_status & 0x0048) rp->stats.rx_fifo_errors++; if (desc_status & 0x0004) rp->stats.rx_frame_errors++; if (desc_status & 0x0002) { /* this can also be updated outside the interrupt handler */ spin_lock(&rp->lock); rp->stats.rx_crc_errors++; spin_unlock(&rp->lock); } } } else { struct sk_buff *skb; /* Length should omit the CRC */ int pkt_len = data_size - 4; /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) { skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */ pci_dma_sync_single_for_cpu(rp->pdev, rp->rx_skbuff_dma[entry], rp->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_copy_to_linear_data(skb, rp->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); pci_dma_sync_single_for_device(rp->pdev, rp->rx_skbuff_dma[entry], rp->rx_buf_sz, PCI_DMA_FROMDEVICE); } else { skb = rp->rx_skbuff[entry]; if (skb == NULL) { printk(KERN_ERR "%s: Inconsistent Rx " "descriptor chain.\n", dev->name); break; } rp->rx_skbuff[entry] = NULL; skb_put(skb, pkt_len); pci_unmap_single(rp->pdev, rp->rx_skbuff_dma[entry], rp->rx_buf_sz, PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); dev->last_rx = jiffies; rp->stats.rx_bytes += pkt_len; rp->stats.rx_packets++; } entry = (++rp->cur_rx) % RX_RING_SIZE; rp->rx_head_desc = &rp->rx_ring[entry]; } /* Refill the Rx ring buffers. */ for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) { struct sk_buff *skb; entry = rp->dirty_rx % RX_RING_SIZE; if (rp->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb(dev, rp->rx_buf_sz); rp->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ skb->dev = dev; /* Mark as being used by this device. */ rp->rx_skbuff_dma[entry] = pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, PCI_DMA_FROMDEVICE); rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); } rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); } return count; } /* * Clears the "tally counters" for CRC errors and missed frames(?). * It has been reported that some chips need a write of 0 to clear * these, for others the counters are set to 1 when written to and * instead cleared when read. So we clear them both ways ... */ static inline void clear_tally_counters(void __iomem *ioaddr) { iowrite32(0, ioaddr + RxMissed); ioread16(ioaddr + RxCRCErrs); ioread16(ioaddr + RxMissed); } static void rhine_restart_tx(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; int entry = rp->dirty_tx % TX_RING_SIZE; u32 intr_status; /* * If new errors occured, we need to sort them out before doing Tx. * In that case the ISR will be back here RSN anyway. */ intr_status = get_intr_status(dev); if ((intr_status & IntrTxErrSummary) == 0) { /* We know better than the chip where it should continue. */ iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), ioaddr + TxRingPtr); iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, ioaddr + ChipCmd); iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, ioaddr + ChipCmd1); IOSYNC; } else { /* This should never happen */ if (debug > 1) printk(KERN_WARNING "%s: rhine_restart_tx() " "Another error occured %8.8x.\n", dev->name, intr_status); } } static void rhine_error(struct net_device *dev, int intr_status) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; spin_lock(&rp->lock); if (intr_status & IntrLinkChange) rhine_check_media(dev, 0); if (intr_status & IntrStatsMax) { rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); clear_tally_counters(ioaddr); } if (intr_status & IntrTxAborted) { if (debug > 1) printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n", dev->name, intr_status); } if (intr_status & IntrTxUnderrun) { if (rp->tx_thresh < 0xE0) iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); if (debug > 1) printk(KERN_INFO "%s: Transmitter underrun, Tx " "threshold now %2.2x.\n", dev->name, rp->tx_thresh); } if (intr_status & IntrTxDescRace) { if (debug > 2) printk(KERN_INFO "%s: Tx descriptor write-back race.\n", dev->name); } if ((intr_status & IntrTxError) && (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { if (rp->tx_thresh < 0xE0) { iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); } if (debug > 1) printk(KERN_INFO "%s: Unspecified error. Tx " "threshold now %2.2x.\n", dev->name, rp->tx_thresh); } if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace | IntrTxError)) rhine_restart_tx(dev); if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun | IntrTxError | IntrTxAborted | IntrNormalSummary | IntrTxDescRace)) { if (debug > 1) printk(KERN_ERR "%s: Something Wicked happened! " "%8.8x.\n", dev->name, intr_status); } spin_unlock(&rp->lock); } static struct net_device_stats *rhine_get_stats(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; unsigned long flags; spin_lock_irqsave(&rp->lock, flags); rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); clear_tally_counters(ioaddr); spin_unlock_irqrestore(&rp->lock, flags); return &rp->stats; } static void rhine_set_rx_mode(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; u32 mc_filter[2]; /* Multicast hash filter */ u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ rx_mode = 0x1C; iowrite32(0xffffffff, ioaddr + MulticastFilter0); iowrite32(0xffffffff, ioaddr + MulticastFilter1); } else if ((dev->mc_count > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to match, or accept all multicasts. */ iowrite32(0xffffffff, ioaddr + MulticastFilter0); iowrite32(0xffffffff, ioaddr + MulticastFilter1); rx_mode = 0x0C; } else { struct dev_mc_list *mclist; int i; memset(mc_filter, 0, sizeof(mc_filter)); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } iowrite32(mc_filter[0], ioaddr + MulticastFilter0); iowrite32(mc_filter[1], ioaddr + MulticastFilter1); rx_mode = 0x0C; } iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig); } static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct rhine_private *rp = netdev_priv(dev); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pci_name(rp->pdev)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rhine_private *rp = netdev_priv(dev); int rc; spin_lock_irq(&rp->lock); rc = mii_ethtool_gset(&rp->mii_if, cmd); spin_unlock_irq(&rp->lock); return rc; } static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rhine_private *rp = netdev_priv(dev); int rc; spin_lock_irq(&rp->lock); rc = mii_ethtool_sset(&rp->mii_if, cmd); spin_unlock_irq(&rp->lock); rhine_set_carrier(&rp->mii_if); return rc; } static int netdev_nway_reset(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); return mii_nway_restart(&rp->mii_if); } static u32 netdev_get_link(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); return mii_link_ok(&rp->mii_if); } static u32 netdev_get_msglevel(struct net_device *dev) { return debug; } static void netdev_set_msglevel(struct net_device *dev, u32 value) { debug = value; } static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rhine_private *rp = netdev_priv(dev); if (!(rp->quirks & rqWOL)) return; spin_lock_irq(&rp->lock); wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ wol->wolopts = rp->wolopts; spin_unlock_irq(&rp->lock); } static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rhine_private *rp = netdev_priv(dev); u32 support = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ if (!(rp->quirks & rqWOL)) return -EINVAL; if (wol->wolopts & ~support) return -EINVAL; spin_lock_irq(&rp->lock); rp->wolopts = wol->wolopts; spin_unlock_irq(&rp->lock); return 0; } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset,