aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/capability.h
blob: 9d1fe30b6f6c1226b7fe6e6d37937c2d50be70ac (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
/*
 * This is <linux/capability.h>
 *
 * Andrew G. Morgan <morgan@kernel.org>
 * Alexander Kjeldaas <astor@guardian.no>
 * with help from Aleph1, Roland Buresund and Andrew Main.
 *
 * See here for the libcap library ("POSIX draft" compliance):
 *
 * ftp://linux.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/
 */

#ifndef _LINUX_CAPABILITY_H
#define _LINUX_CAPABILITY_H

#include <linux/types.h>

struct task_struct;

/* User-level do most of the mapping between kernel and user
   capabilities based on the version tag given by the kernel. The
   kernel might be somewhat backwards compatible, but don't bet on
   it. */

/* Note, cap_t, is defined by POSIX (draft) to be an "opaque" pointer to
   a set of three capability sets.  The transposition of 3*the
   following structure to such a composite is better handled in a user
   library since the draft standard requires the use of malloc/free
   etc.. */

#define _LINUX_CAPABILITY_VERSION_1  0x19980330
#define _LINUX_CAPABILITY_U32S_1     1

#define _LINUX_CAPABILITY_VERSION_2  0x20071026  /* deprecated - use v3 */
#define _LINUX_CAPABILITY_U32S_2     2

#define _LINUX_CAPABILITY_VERSION_3  0x20080522
#define _LINUX_CAPABILITY_U32S_3     2

typedef struct __user_cap_header_struct {
	__u32 version;
	int pid;
} __user *cap_user_header_t;

typedef struct __user_cap_data_struct {
        __u32 effective;
        __u32 permitted;
        __u32 inheritable;
} __user *cap_user_data_t;


#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX

#define VFS_CAP_REVISION_MASK	0xFF000000
#define VFS_CAP_FLAGS_MASK	~VFS_CAP_REVISION_MASK
#define VFS_CAP_FLAGS_EFFECTIVE	0x000001

#define VFS_CAP_REVISION_1	0x01000000
#define VFS_CAP_U32_1           1
#define XATTR_CAPS_SZ_1         (sizeof(__le32)*(1 + 2*VFS_CAP_U32_1))

#define VFS_CAP_REVISION_2	0x02000000
#define VFS_CAP_U32_2           2
#define XATTR_CAPS_SZ_2         (sizeof(__le32)*(1 + 2*VFS_CAP_U32_2))

#define XATTR_CAPS_SZ           XATTR_CAPS_SZ_2
#define VFS_CAP_U32             VFS_CAP_U32_2
#define VFS_CAP_REVISION	VFS_CAP_REVISION_2


struct vfs_cap_data {
	__le32 magic_etc;            /* Little endian */
	struct {
		__le32 permitted;    /* Little endian */
		__le32 inheritable;  /* Little endian */
	} data[VFS_CAP_U32];
};

#ifndef __KERNEL__

/*
 * Backwardly compatible definition for source code - trapped in a
 * 32-bit world. If you find you need this, please consider using
 * libcap to untrap yourself...
 */
#define _LINUX_CAPABILITY_VERSION  _LINUX_CAPABILITY_VERSION_1
#define _LINUX_CAPABILITY_U32S     _LINUX_CAPABILITY_U32S_1

#else

#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
#define _KERNEL_CAPABILITY_U32S    _LINUX_CAPABILITY_U32S_3

typedef struct kernel_cap_struct {
	__u32 cap[_KERNEL_CAPABILITY_U32S];
} kernel_cap_t;

#define _USER_CAP_HEADER_SIZE  (sizeof(struct __user_cap_header_struct))
#define _KERNEL_CAP_T_SIZE     (sizeof(kernel_cap_t))

#endif


/**
 ** POSIX-draft defined capabilities.
 **/

/* In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this
   overrides the restriction of changing file ownership and group
   ownership. */

#define CAP_CHOWN            0

/* Override all DAC access, including ACL execute access if
   [_POSIX_ACL] is defined. Excluding DAC access covered by
   CAP_LINUX_IMMUTABLE. */

#define CAP_DAC_OVERRIDE     1

/* Overrides all DAC restrictions regarding read and search on files
   and directories, including ACL restrictions if [_POSIX_ACL] is
   defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. */

#define CAP_DAC_READ_SEARCH  2

/* Overrides all restrictions about allowed operations on files, where
   file owner ID must be equal to the user ID, except where CAP_FSETID
   is applicable. It doesn't override MAC and DAC restrictions. */

#define CAP_FOWNER           3

/* Overrides the following restrictions that the effective user ID
   shall match the file owner ID when setting the S_ISUID and S_ISGID
   bits on that file; that the effective group ID (or one of the
   supplementary group IDs) shall match the file owner ID when setting
   the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are
   cleared on successful return from chown(2) (not implemented). */

#define CAP_FSETID           4

/* Overrides the restriction that the real or effective user ID of a
   process sending a signal must match the real or effective user ID
   of the process receiving the signal. */

#define CAP_KILL             5

/* Allows setgid(2) manipulation */
/* Allows setgroups(2) */
/* Allows forged gids on socket credentials passing. */

#define CAP_SETGID           6

/* Allows set*uid(2) manipulation (including fsuid). */
/* Allows forged pids on socket credentials passing. */

#define CAP_SETUID           7


/**
 ** Linux-specific capabilities
 **/

/* Without VFS support for capabilities:
 *   Transfer any capability in your permitted set to any pid,
 *   remove any capability in your permitted set from any pid
 * With VFS support for capabilities (neither of above, but)
 *   Add any capability from current's capability bounding set
 *       to the current process' inheritable set
 *   Allow taking bits out of capability bounding set
 *   Allow modification of the securebits for a process
 */

#define CAP_SETPCAP          8

/* Allow modification of S_IMMUTABLE and S_APPEND file attributes */

#define CAP_LINUX_IMMUTABLE  9

/* Allows binding to TCP/UDP sockets below 1024 */
/* Allows binding to ATM VCIs below 32 */

#define CAP_NET_BIND_SERVICE 10

/* Allow broadcasting, listen to multicast */

#define CAP_NET_BROADCAST    11

/* Allow interface configuration */
/* Allow administration of IP firewall, masquerading and accounting */
/* Allow setting debug option on sockets */
/* Allow modification of routing tables */
/* Allow setting arbitrary process / process group ownership on
   sockets */
/* Allow binding to any address for transparent proxying */
/* Allow setting TOS (type of service) */
/* Allow setting promiscuous mode */
/* Allow clearing driver statistics */
/* Allow multicasting */
/* Allow read/write of device-specific registers */
/* Allow activation of ATM control sockets */

#define CAP_NET_ADMIN        12

/* Allow use of RAW sockets */
/* Allow use of PACKET sockets */

#define CAP_NET_RAW          13

/* Allow locking of shared memory segments */
/* Allow mlock and mlockall (which doesn't really have anything to do
   with IPC) */

#define CAP_IPC_LOCK         14

/* Override IPC ownership checks */

#define CAP_IPC_OWNER        15

/* Insert and remove kernel modules - modify kernel without limit */
#define CAP_SYS_MODULE       16

/* Allow ioperm/iopl access */
/* Allow sending USB messages to any device via /proc/bus/usb */

#define CAP_SYS_RAWIO        17

/* Allow use of chroot() */

#define CAP_SYS_CHROOT       18

/* Allow ptrace() of any process */

#define CAP_SYS_PTRACE       19

/* Allow configuration of process accounting */

#define CAP_SYS_PACCT        20

/* Allow configuration of the secure attention key */
/* Allow administration of the random device */
/* Allow examination and configuration of disk quotas */
/* Allow configuring the kernel's syslog (printk behaviour) */
/* Allow setting the domainname */
/* Allow setting the hostname */
/* Allow calling bdflush() */
/* Allow mount() and umount(), setting up new smb connection */
/* Allow some autofs root ioctls */
/* Allow nfsservctl */
/* Allow VM86_REQUEST_IRQ */
/* Allow to read/write pci config on alpha */
/* Allow irix_prctl on mips (setstacksize) */
/* Allow flushing all cache on m68k (sys_cacheflush) */
/* Allow removing semaphores */
/* Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores
   and shared memory */
/* Allow locking/unlocking of shared memory segment */
/* Allow turning swap on/off */
/* Allow forged pids on socket credentials passing */
/* Allow setting readahead and flushing buffers on block devices */
/* Allow setting geometry in floppy driver */
/* Allow turning DMA on/off in xd driver */
/* Allow administration of md devices (mostly the above, but some
   extra ioctls) */
/* Allow tuning the ide driver */
/* Allow access to the nvram device */
/* Allow administration of apm_bios, serial and bttv (TV) device */
/* Allow manufacturer commands in isdn CAPI support driver */
/* Allow reading non-standardized portions of pci configuration space */
/* Allow DDI debug ioctl on sbpcd driver */
/* Allow setting up serial ports */
/* Allow sending raw qic-117 commands */
/* Allow enabling/disabling tagged queuing on SCSI controllers and sending
   arbitrary SCSI commands */
/* Allow setting encryption key on loopback filesystem */
/* Allow setting zone reclaim policy */

#define CAP_SYS_ADMIN        21

/* Allow use of reboot() */

#define CAP_SYS_BOOT         22

/* Allow raising priority and setting priority on other (different
   UID) processes */
/* Allow use of FIFO and round-robin (realtime) scheduling on own
   processes and setting the scheduling algorithm used by another
   process. */
/* Allow setting cpu affinity on other processes */

#define CAP_SYS_NICE         23

/* Override resource limits. Set resource limits. */
/* Override quota limits. */
/* Override reserved space on ext2 filesystem */
/* Modify data journaling mode on ext3 filesystem (uses journaling
   resources) */
/* NOTE: ext2 honors fsuid when checking for resource overrides, so
   you can override using fsuid too */
/* Override size restrictions on IPC message queues */
/* Allow more than 64hz interrupts from the real-time clock */
/* Override max number of consoles on console allocation */
/* Override max number of keymaps */

#define CAP_SYS_RESOURCE     24

/* Allow manipulation of system clock */
/* Allow irix_stime on mips */
/* Allow setting the real-time clock */

#define CAP_SYS_TIME         25

/* Allow configuration of tty devices */
/* Allow vhangup() of tty */

#define CAP_SYS_TTY_CONFIG   26

/* Allow the privileged aspects of mknod() */

#define CAP_MKNOD            27

/* Allow taking of leases on files */

#define CAP_LEASE            28

#define CAP_AUDIT_WRITE      29

#define CAP_AUDIT_CONTROL    30

#define CAP_SETFCAP	     31

/* Override MAC access.
   The base kernel enforces no MAC policy.
   An LSM may enforce a MAC policy, and if it does and it chooses
   to implement capability based overrides of that policy, this is
   the capability it should use to do so. */

#define CAP_MAC_OVERRIDE     32

/* Allow MAC configuration or state changes.
   The base kernel requires no MAC configuration.
   An LSM may enforce a MAC policy, and if it does and it chooses
   to implement capability based checks on modifications to that
   policy or the data required to maintain it, this is the
   capability it should use to do so. */

#define CAP_MAC_ADMIN        33

#define CAP_LAST_CAP         CAP_MAC_ADMIN

#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)

/*
 * Bit location of each capability (used by user-space library and kernel)
 */

#define CAP_TO_INDEX(x)     ((x) >> 5)        /* 1 << 5 == bits in __u32 */
#define CAP_TO_MASK(x)      (1 << ((x) & 31)) /* mask for indexed __u32 */

#ifdef __KERNEL__

/*
 * Internal kernel functions only
 */

#define CAP_FOR_EACH_U32(__capi)  \
	for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)

# define CAP_FS_MASK_B0     (CAP_TO_MASK(CAP_CHOWN)		\
			    | CAP_TO_MASK(CAP_DAC_OVERRIDE)	\
			    | CAP_TO_MASK(CAP_DAC_READ_SEARCH)	\
			    | CAP_TO_MASK(CAP_FOWNER)		\
			    | CAP_TO_MASK(CAP_FSETID))

# define CAP_FS_MASK_B1     (CAP_TO_MASK(CAP_MAC_OVERRIDE))

#if _KERNEL_CAPABILITY_U32S != 2
# error Fix up hand-coded capability macro initializers
#else /* HAND-CODED capability initializers */

# define CAP_EMPTY_SET    ((kernel_cap_t){{ 0, 0 }})
# define CAP_FULL_SET     ((kernel_cap_t){{ ~0, ~0 }})
# define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }})
# define CAP_FS_SET       ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } })
# define CAP_NFSD_SET     ((kernel_cap_t){{ CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE), \
					CAP_FS_MASK_B1 } })

#endif /* _KERNEL_CAPABILITY_U32S != 2 */

#define CAP_INIT_INH_SET    CAP_EMPTY_SET

# define cap_clear(c)         do { (c) = __cap_empty_set; } while (0)
# define cap_set_full(c)      do { (c) = __cap_full_set; } while (0)
# define cap_set_init_eff(c)  do { (c) = __cap_init_eff_set; } while (0)

#define cap_raise(c, flag)  ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag))
#define cap_lower(c, flag)  ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag))
#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag))

#define CAP_BOP_ALL(c, a, b, OP)                                    \
do {                                                                \
	unsigned __capi;                                            \
	CAP_FOR_EACH_U32(__capi) {                                  \
		c.cap[__capi] = a.cap[__capi] OP b.cap[__capi];     \
	}                                                           \
} while (0)

#define CAP_UOP_ALL(c, a, OP)                                       \
do {                                                                \
	unsigned __capi;                                            \
	CAP_FOR_EACH_U32(__capi) {                                  \
		c.cap[__capi] = OP a.cap[__capi];                   \
	}                                                           \
} while (0)

static inline kernel_cap_t cap_combine(const kernel_cap_t a,
				       const kernel_cap_t b)
{
	kernel_cap_t dest;
	CAP_BOP_ALL(dest, a, b, |);
	return dest;
}

static inline kernel_cap_t cap_intersect(const kernel_cap_t a,
					 const kernel_cap_t b)
{
	kernel_cap_t dest;
	CAP_BOP_ALL(dest, a, b, &);
	return dest;
}

static inline kernel_cap_t cap_drop(const kernel_cap_t a,
				    const kernel_cap_t drop)
{
	kernel_cap_t dest;
	CAP_BOP_ALL(dest, a, drop, &~);
	return dest;
}

static inline kernel_cap_t cap_invert(const kernel_cap_t c)
{
	kernel_cap_t dest;
	CAP_UOP_ALL(dest, c, ~);
	return dest;
}

static inline int cap_isclear(const kernel_cap_t a)
{
	unsigned __capi;
	CAP_FOR_EACH_U32(__capi) {
		if (a.cap[__capi] != 0)
			return 0;
	}
	return 1;
}

static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
	kernel_cap_t dest;
	dest = cap_drop(a, set);
	return cap_isclear(dest);
}

/* Used to decide between falling back on the old suser() or fsuser(). */

static inline int cap_is_fs_cap(int cap)
{
	const kernel_cap_t __cap_fs_set = CAP_FS_SET;
	return !!(CAP_TO_MASK(cap) & __cap_fs_set.cap[CAP_TO_INDEX(cap)]);
}

static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
	const kernel_cap_t __cap_fs_set = CAP_FS_SET;
	return cap_drop(a, __cap_fs_set);
}

static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
					    const kernel_cap_t permitted)
{
	const kernel_cap_t __cap_fs_set = CAP_FS_SET;
	return cap_combine(a,
			   cap_intersect(permitted, __cap_fs_set));
}

static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
{
	const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
	return cap_drop(a, __cap_fs_set);
}

static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
					      const kernel_cap_t permitted)
{
	const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
	return cap_combine(a,
			   cap_intersect(permitted, __cap_nfsd_set));
}

extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_full_set;
extern const kernel_cap_t __cap_init_eff_set;

kernel_cap_t cap_set_effective(const kernel_cap_t pE_new);

/**
 * has_capability - Determine if a task has a superior capability available
 * @t: The task in question
 * @cap: The capability to be tested for
 *
 * Return true if the specified task has the given superior capability
 * currently in effect, false if not.
 *
 * Note that this does not set PF_SUPERPRIV on the task.
 */
#define has_capability(t, cap) (security_capable((t), (cap)) == 0)

extern int capable(int cap);

#endif /* __KERNEL__ */

#endif /* !_LINUX_CAPABILITY_H */
The EEPROM is actually bigger but only the first few bytes are used so we * only report those. */ #define EEPROM_SIZE 32 MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("GPL"); static int dflt_msg_enable = DFLT_MSG_ENABLE; module_param(dflt_msg_enable, int, 0); MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap"); #define HCLOCK 0x0 #define LCLOCK 0x1 /* T1 cards powersave mode */ static int t1_clock(struct adapter *adapter, int mode); static int t1powersave = 1; /* HW default is powersave mode. */ module_param(t1powersave, int, 0); MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode"); static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); static const char pci_speed[][4] = { "33", "66", "100", "133" }; /* * Setup MAC to receive the types of packets we want. */ static void t1_set_rxmode(struct net_device *dev) { struct adapter *adapter = dev->priv; struct cmac *mac = adapter->port[dev->if_port].mac; struct t1_rx_mode rm; rm.dev = dev; rm.idx = 0; rm.list = dev->mc_list; mac->ops->set_rx_mode(mac, &rm); } static void link_report(struct port_info *p) { if (!netif_carrier_ok(p->dev)) printk(KERN_INFO "%s: link down\n", p->dev->name); else { const char *s = "10Mbps"; switch (p->link_config.speed) { case SPEED_10000: s = "10Gbps"; break; case SPEED_1000: s = "1000Mbps"; break; case SPEED_100: s = "100Mbps"; break; } printk(KERN_INFO "%s: link up, %s, %s-duplex\n", p->dev->name, s, p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); } } void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat, int speed, int duplex, int pause) { struct port_info *p = &adapter->port[port_id]; if (link_stat != netif_carrier_ok(p->dev)) { if (link_stat) netif_carrier_on(p->dev); else netif_carrier_off(p->dev); link_report(p); /* multi-ports: inform toe */ if ((speed > 0) && (adapter->params.nports > 1)) { unsigned int sched_speed = 10; switch (speed) { case SPEED_1000: sched_speed = 1000; break; case SPEED_100: sched_speed = 100; break; case SPEED_10: sched_speed = 10; break; } t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); } } } static void link_start(struct port_info *p) { struct cmac *mac = p->mac; mac->ops->reset(mac); if (mac->ops->macaddress_set) mac->ops->macaddress_set(mac, p->dev->dev_addr); t1_set_rxmode(p->dev); t1_link_start(p->phy, mac, &p->link_config); mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); } static void enable_hw_csum(struct adapter *adapter) { if (adapter->flags & TSO_CAPABLE) t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ if (adapter->flags & UDP_CSUM_CAPABLE) t1_tp_set_udp_checksum_offload(adapter->tp, 1); t1_tp_set_tcp_checksum_offload(adapter->tp, 1); } /* * Things to do upon first use of a card. * This must run with the rtnl lock held. */ static int cxgb_up(struct adapter *adapter) { int err = 0; if (!(adapter->flags & FULL_INIT_DONE)) { err = t1_init_hw_modules(adapter); if (err) goto out_err; enable_hw_csum(adapter); adapter->flags |= FULL_INIT_DONE; } t1_interrupts_clear(adapter); adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); err = request_irq(adapter->pdev->irq, t1_interrupt, adapter->params.has_msi ? 0 : IRQF_SHARED, adapter->name, adapter); if (err) { if (adapter->params.has_msi) pci_disable_msi(adapter->pdev); goto out_err; } t1_sge_start(adapter->sge); t1_interrupts_enable(adapter); out_err: return err; } /* * Release resources when all the ports have been stopped. */ static void cxgb_down(struct adapter *adapter) { t1_sge_stop(adapter->sge); t1_interrupts_disable(adapter); free_irq(adapter->pdev->irq, adapter); if (adapter->params.has_msi) pci_disable_msi(adapter->pdev); } static int cxgb_open(struct net_device *dev) { int err; struct adapter *adapter = dev->priv; int other_ports = adapter->open_device_map & PORT_MASK; napi_enable(&adapter->napi); if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { napi_disable(&adapter->napi); return err; } __set_bit(dev->if_port, &adapter->open_device_map); link_start(&adapter->port[dev->if_port]); netif_start_queue(dev); if (!other_ports && adapter->params.stats_update_period) schedule_mac_stats_update(adapter, adapter->params.stats_update_period); return 0; } static int cxgb_close(struct net_device *dev) { struct adapter *adapter = dev->priv; struct port_info *p = &adapter->port[dev->if_port]; struct cmac *mac = p->mac; netif_stop_queue(dev); napi_disable(&adapter->napi); mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); netif_carrier_off(dev); clear_bit(dev->if_port, &adapter->open_device_map); if (adapter->params.stats_update_period && !(adapter->open_device_map & PORT_MASK)) { /* Stop statistics accumulation. */ smp_mb__after_clear_bit(); spin_lock(&adapter->work_lock); /* sync with update task */ spin_unlock(&adapter->work_lock); cancel_mac_stats_update(adapter); } if (!adapter->open_device_map) cxgb_down(adapter); return 0; } static struct net_device_stats *t1_get_stats(struct net_device *dev) { struct adapter *adapter = dev->priv; struct port_info *p = &adapter->port[dev->if_port]; struct net_device_stats *ns = &p->netstats; const struct cmac_statistics *pstats; /* Do a full update of the MAC stats */ pstats = p->mac->ops->statistics_update(p->mac, MAC_STATS_UPDATE_FULL); ns->tx_packets = pstats->TxUnicastFramesOK + pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; ns->rx_packets = pstats->RxUnicastFramesOK + pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK; ns->tx_bytes = pstats->TxOctetsOK; ns->rx_bytes = pstats->RxOctetsOK; ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors + pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions; ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors + pstats->RxFCSErrors + pstats->RxAlignErrors + pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors + pstats->RxSymbolErrors + pstats->RxRuntErrors; ns->multicast = pstats->RxMulticastFramesOK; ns->collisions = pstats->TxTotalCollisions; /* detailed rx_errors */ ns->rx_length_errors = pstats->RxFrameTooLongErrors + pstats->RxJabberErrors; ns->rx_over_errors = 0; ns->rx_crc_errors = pstats->RxFCSErrors; ns->rx_frame_errors = pstats->RxAlignErrors; ns->rx_fifo_errors = 0; ns->rx_missed_errors = 0; /* detailed tx_errors */ ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions; ns->tx_carrier_errors = 0; ns->tx_fifo_errors = pstats->TxUnderrun; ns->tx_heartbeat_errors = 0; ns->tx_window_errors = pstats->TxLateCollisions; return ns; } static u32 get_msglevel(struct net_device *dev) { struct adapter *adapter = dev->priv; return adapter->msg_enable; } static void set_msglevel(struct net_device *dev, u32 val) { struct adapter *adapter = dev->priv; adapter->msg_enable = val; } static char stats_strings[][ETH_GSTRING_LEN] = { "TxOctetsOK", "TxOctetsBad", "TxUnicastFramesOK", "TxMulticastFramesOK", "TxBroadcastFramesOK", "TxPauseFrames", "TxFramesWithDeferredXmissions", "TxLateCollisions", "TxTotalCollisions", "TxFramesAbortedDueToXSCollisions", "TxUnderrun", "TxLengthErrors", "TxInternalMACXmitError", "TxFramesWithExcessiveDeferral", "TxFCSErrors", "RxOctetsOK", "RxOctetsBad", "RxUnicastFramesOK", "RxMulticastFramesOK", "RxBroadcastFramesOK", "RxPauseFrames", "RxFCSErrors", "RxAlignErrors", "RxSymbolErrors", "RxDataErrors", "RxSequenceErrors", "RxRuntErrors", "RxJabberErrors", "RxInternalMACRcvError", "RxInRangeLengthErrors", "RxOutOfRangeLengthField", "RxFrameTooLongErrors", /* Port stats */ "RxPackets", "RxCsumGood", "TxPackets", "TxCsumOffload", "TxTso", "RxVlan", "TxVlan", /* Interrupt stats */ "rx drops", "pure_rsps", "unhandled irqs", "respQ_empty", "respQ_overflow", "freelistQ_empty", "pkt_too_big", "pkt_mismatch", "cmdQ_full0", "cmdQ_full1", "espi_DIP2ParityErr", "espi_DIP4Err", "espi_RxDrops", "espi_TxDrops", "espi_RxOvfl", "espi_ParityErr" }; #define T2_REGMAP_SIZE (3 * 1024) static int get_regs_len(struct net_device *dev) { return T2_REGMAP_SIZE; } static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct adapter *adapter = dev->priv; strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->fw_version, "N/A"); strcpy(info->bus_info, pci_name(adapter->pdev)); } static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings); default: return -EOPNOTSUPP; } } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) { if (stringset == ETH_SS_STATS) memcpy(data, stats_strings, sizeof(stats_strings)); } static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct adapter *adapter = dev->priv; struct cmac *mac = adapter->port[dev->if_port].mac; const struct cmac_statistics *s; const struct sge_intr_counts *t; struct sge_port_stats ss; unsigned int len; s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK); memcpy(data, &s->TxOctetsOK, len); data += len; len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK); memcpy(data, &s->RxOctetsOK, len); data += len; t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); memcpy(data, &ss, sizeof(ss)); data += sizeof(ss); t = t1_sge_get_intr_counts(adapter->sge); *data++ = t->rx_drops; *data++ = t->pure_rsps; *data++ = t->unhandled_irqs; *data++ = t->respQ_empty; *data++ = t->respQ_overflow; *data++ = t->freelistQ_empty; *data++ = t->pkt_too_big; *data++ = t->pkt_mismatch; *data++ = t->cmdQ_full[0]; *data++ = t->cmdQ_full[1]; if (adapter->espi) { const struct espi_intr_counts *e; e = t1_espi_get_intr_counts(adapter->espi); *data++ = e->DIP2_parity_err; *data++ = e->DIP4_err; *data++ = e->rx_drops; *data++ = e->tx_drops; *data++ = e->rx_ovflw; *data++ = e->parity_err; } } static inline void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, unsigned int end) { u32 *p = buf + start; for ( ; start <= end; start += sizeof(u32)) *p++ = readl(ap->regs + start); } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct adapter *ap = dev->priv; /* * Version scheme: bits 0..9: chip version, bits 10..15: chip revision */ regs->version = 2; memset(buf, 0, T2_REGMAP_SIZE); reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); } static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct adapter *adapter = dev->priv; struct port_info *p = &adapter->port[dev->if_port]; cmd->supported = p->link_config.supported; cmd->advertising = p->link_config.advertising; if (netif_carrier_ok(dev)) { cmd->speed = p->link_config.speed; cmd->duplex = p->link_config.duplex; } else { cmd->speed = -1; cmd->duplex = -1; } cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; cmd->phy_address = p->phy->addr; cmd->transceiver = XCVR_EXTERNAL; cmd->autoneg = p->link_config.autoneg; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static int speed_duplex_to_caps(int speed, int duplex) { int cap = 0; switch (speed) { case SPEED_10: if (duplex == DUPLEX_FULL) cap = SUPPORTED_10baseT_Full; else cap = SUPPORTED_10baseT_Half; break; case SPEED_100: if (duplex == DUPLEX_FULL) cap = SUPPORTED_100baseT_Full; else cap = SUPPORTED_100baseT_Half; break; case SPEED_1000: if (duplex == DUPLEX_FULL) cap = SUPPORTED_1000baseT_Full; else cap = SUPPORTED_1000baseT_Half; break; case SPEED_10000: if (duplex == DUPLEX_FULL) cap = SUPPORTED_10000baseT_Full; } return cap; } #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ ADVERTISED_10000baseT_Full) static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct adapter *adapter = dev->priv; struct port_info *p = &adapter->port[dev->if_port]; struct link_config *lc = &p->link_config; if (!(lc->supported & SUPPORTED_Autoneg)) return -EOPNOTSUPP; /* can't change speed/duplex */ if (cmd->autoneg == AUTONEG_DISABLE) { int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); if (!(lc->supported & cap) || cmd->speed == SPEED_1000) return -EINVAL; lc->requested_speed = cmd->speed; lc->requested_duplex = cmd->duplex; lc->advertising = 0; } else { cmd->advertising &= ADVERTISED_MASK; if (cmd->advertising & (cmd->advertising - 1)) cmd->advertising = lc->supported; cmd->advertising &= lc->supported; if (!cmd->advertising) return -EINVAL; lc->requested_speed = SPEED_INVALID; lc->requested_duplex = DUPLEX_INVALID; lc->advertising = cmd->advertising | ADVERTISED_Autoneg; } lc->autoneg = cmd->autoneg; if (netif_running(dev)) t1_link_start(p->phy, p->mac, lc); return 0; } static void get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct adapter *adapter = dev->priv; struct port_info *p = &adapter->port[dev->if_port]; epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; } static int set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct adapter *adapter = dev->priv; struct port_info *p = &adapter->port[dev->if_port]; struct link_config *lc = &p->link_config; if (epause->autoneg == AUTONEG_DISABLE) lc->requested_fc = 0; else if (lc->supported & SUPPORTED_Autoneg) lc->requested_fc = PAUSE_AUTONEG; else return -EINVAL; if (epause->rx_pause) lc->requested_fc |= PAUSE_RX; if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (lc->autoneg == AUTONEG_ENABLE) { if (netif_running(dev)) t1_link_start(p->phy, p->mac, lc); } else { lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); if (netif_running(dev)) p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1, lc->fc); } return 0; } static u32 get_rx_csum(struct net_device *dev) { struct adapter *adapter = dev->priv; return (adapter->flags & RX_CSUM_ENABLED) != 0; } static int set_rx_csum(struct net_device *dev, u32 data) { struct adapter *adapter = dev->priv; if (data) adapter->flags |= RX_CSUM_ENABLED; else adapter->flags &= ~RX_CSUM_ENABLED; return 0; } static int set_tso(struct net_device *dev, u32 value) { struct adapter *adapter = dev->priv; if (!(adapter->flags & TSO_CAPABLE)) return value ? -EOPNOTSUPP : 0; return ethtool_op_set_tso(dev, value); } static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { struct adapter *adapter = dev->priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; e->rx_max_pending = MAX_RX_BUFFERS; e->rx_mini_max_pending = 0; e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; e->tx_max_pending = MAX_CMDQ_ENTRIES; e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; e->rx_mini_pending = 0; e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; e->tx_pending = adapter->params.sge.cmdQ_size[0]; } static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { struct adapter *adapter = dev->priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || e->tx_pending > MAX_CMDQ_ENTRIES || e->rx_pending < MIN_FL_ENTRIES || e->rx_jumbo_pending < MIN_FL_ENTRIES || e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) return -EINVAL; if (adapter->flags & FULL_INIT_DONE) return -EBUSY; adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; adapter->params.sge.cmdQ_size[0] = e->tx_pending; adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ? MAX_CMDQ1_ENTRIES : e->tx_pending; return 0; } static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct adapter *adapter = dev->priv; adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); return 0; } static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct adapter *adapter = dev->priv; c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; return 0; } static int get_eeprom_len(struct net_device *dev) { struct adapter *adapter = dev->priv; return t1_is_asic(adapter) ? EEPROM_SIZE : 0; } #define EEPROM_MAGIC(ap) \ (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16)) static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, u8 *data) { int i; u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); struct adapter *adapter = dev->priv; e->magic = EEPROM_MAGIC(adapter); for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) t1_seeprom_read(adapter, i, (u32 *)&buf[i]); memcpy(data, buf + e->offset, e->len); return 0; } static const struct ethtool_ops t1_ethtool_ops = { .get_settings = get_settings, .set_settings = set_settings, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, .get_ringparam = get_sge_param, .set_ringparam = set_sge_param, .get_coalesce = get_coalesce, .set_coalesce = set_coalesce, .get_eeprom_len = get_eeprom_len, .get_eeprom = get_eeprom, .get_pauseparam = get_pauseparam, .set_pauseparam = set_pauseparam, .get_rx_csum = get_rx_csum, .set_rx_csum = set_rx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = ethtool_op_set_sg, .get_link = ethtool_op_get_link, .get_strings = get_strings, .get_sset_count = get_sset_count, .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, .set_tso = set_tso, }; static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct adapter *adapter = dev->priv; struct mii_ioctl_data *data = if_mii(req); switch (cmd) { case SIOCGMIIPHY: data->phy_id = adapter->port[dev->if_port].phy->addr; /* FALLTHRU */ case SIOCGMIIREG: { struct cphy *phy = adapter->port[dev->if_port].phy; u32 val; if (!phy->mdio_read) return -EOPNOTSUPP; phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, &val); data->val_out = val; break; } case SIOCSMIIREG: { struct cphy *phy = adapter->port[dev->if_port].phy; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!phy->mdio_write) return -EOPNOTSUPP; phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, data->val_in); break; } default: return -EOPNOTSUPP; } return 0; } static int t1_change_mtu(struct net_device *dev, int new_mtu) { int ret; struct adapter *adapter = dev->priv; struct cmac *mac = adapter->port[dev->if_port].mac; if (!mac->ops->set_mtu) return -EOPNOTSUPP; if (new_mtu < 68) return -EINVAL; if ((ret = mac->ops->set_mtu(mac, new_mtu))) return ret; dev->mtu = new_mtu; return 0; } static int t1_set_mac_addr(struct net_device *dev, void *p) { struct adapter *adapter = dev->priv; struct cmac *mac = adapter->port[dev->if_port].mac; struct sockaddr *addr = p; if (!mac->ops->macaddress_set) return -EOPNOTSUPP; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); mac->ops->macaddress_set(mac, dev->dev_addr); return 0; } #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct adapter *adapter = dev->priv; spin_lock_irq(&adapter->async_lock); adapter->vlan_grp = grp; t1_set_vlan_accel(adapter, grp != NULL); spin_unlock_irq(&adapter->async_lock); } #endif #ifdef CONFIG_NET_POLL_CONTROLLER static void t1_netpoll(struct net_device *dev) { unsigned long flags; struct adapter *adapter = dev->priv; local_irq_save(flags); t1_interrupt(adapter->pdev->irq, adapter); local_irq_restore(flags); } #endif /* * Periodic accumulation of MAC statistics. This is used only if the MAC * does not have any other way to prevent stats counter overflow. */ static void mac_stats_task(struct work_struct *work) { int i; struct adapter *adapter = container_of(work, struct adapter, stats_update_task.work); for_each_port(adapter, i) { struct port_info *p = &adapter->port[i]; if (netif_running(p->dev)) p->mac->ops->statistics_update(p->mac, MAC_STATS_UPDATE_FAST); } /* Schedule the next statistics update if any port is active. */ spin_lock(&adapter->work_lock); if (adapter->open_device_map & PORT_MASK) schedule_mac_stats_update(adapter, adapter->params.stats_update_period); spin_unlock(&adapter->work_lock); } /* * Processes elmer0 external interrupts in process context. */ static void ext_intr_task(struct work_struct *work) { struct adapter *adapter = container_of(work, struct adapter, ext_intr_handler_task); t1_elmer0_ext_intr_handler(adapter); /* Now reenable external interrupts */ spin_lock_irq(&adapter->async_lock); adapter->slow_intr_mask |= F_PL_INTR_EXT; writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, adapter->regs + A_PL_ENABLE); spin_unlock_irq(&adapter->async_lock); } /* * Interrupt-context handler for elmer0 external interrupts. */ void t1_elmer0_ext_intr(struct adapter *adapter) { /* * Schedule a task to handle external interrupts as we require * a process context. We disable EXT interrupts in the interim * and let the task reenable them when it's done. */ adapter->slow_intr_mask &= ~F_PL_INTR_EXT; writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, adapter->regs + A_PL_ENABLE); schedule_work(&adapter->ext_intr_handler_task); } void t1_fatal_err(struct adapter *adapter) { if (adapter->flags & FULL_INIT_DONE) { t1_sge_stop(adapter->sge); t1_interrupts_disable(adapter); } CH_ALERT("%s: encountered fatal error, operation suspended\n", adapter->name); } static int __devinit init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int version_printed; int i, err, pci_using_dac = 0; unsigned long mmio_start, mmio_len; const struct board_info *bi; struct adapter *adapter = NULL; struct port_info *pi; if (!version_printed) { printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION); ++version_printed; } err = pci_enable_device(pdev); if (err) return err; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { CH_ERR("%s: cannot find PCI device memory base address\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { pci_using_dac = 1; if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { CH_ERR("%s: unable to obtain 64-bit DMA for" "consistent allocations\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) { CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev)); goto out_disable_pdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev)); goto out_disable_pdev; } pci_set_master(pdev); mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); bi = t1_get_board_info(ent->driver_data); for (i = 0; i < bi->port_number; ++i) { struct net_device *netdev; netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); if (!netdev) { err = -ENOMEM; goto out_free_dev; } SET_NETDEV_DEV(netdev, &pdev->dev); if (!adapter) { adapter = netdev->priv; adapter->pdev = pdev; adapter->port[0].dev = netdev; /* so we don't leak it */ adapter->regs = ioremap(mmio_start, mmio_len); if (!adapter->regs) { CH_ERR("%s: cannot map device registers\n", pci_name(pdev)); err = -ENOMEM; goto out_free_dev; } if (t1_get_board_rev(adapter, bi, &adapter->params)) { err = -ENODEV; /* Can't handle this chip rev */ goto out_free_dev; } adapter->name = pci_name(pdev); adapter->msg_enable = dflt_msg_enable; adapter->mmio_len = mmio_len; spin_lock_init(&adapter->tpi_lock); spin_lock_init(&adapter->work_lock); spin_lock_init(&adapter->async_lock); spin_lock_init(&adapter->mac_lock); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); INIT_DELAYED_WORK(&adapter->stats_update_task, mac_stats_task); pci_set_drvdata(pdev, netdev); } pi = &adapter->port[i]; pi->dev = netdev; netif_carrier_off(netdev); netdev->irq = pdev->irq; netdev->if_port = i; netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len - 1; netdev->priv = adapter; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; netdev->features |= NETIF_F_LLTX; adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; if (vlan_tso_capable(adapter)) { #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) adapter->flags |= VLAN_ACCEL_CAPABLE; netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; netdev->vlan_rx_register = vlan_rx_register; #endif /* T204: disable TSO */ if (!(is_T2(adapter)) || bi->port_number != 4) { adapter->flags |= TSO_CAPABLE; netdev->features |= NETIF_F_TSO; } } netdev->open = cxgb_open; netdev->stop = cxgb_close; netdev->hard_start_xmit = t1_start_xmit; netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ? sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); netdev->get_stats = t1_get_stats; netdev->set_multicast_list = t1_set_rxmode; netdev->do_ioctl = t1_ioctl; netdev->change_mtu = t1_change_mtu; netdev->set_mac_address = t1_set_mac_addr; #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = t1_netpoll; #endif #ifdef CONFIG_CHELSIO_T1_NAPI netif_napi_add(netdev, &adapter->napi, t1_poll, 64); #endif SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); } if (t1_init_sw_modules(adapter, bi) < 0) { err = -ENODEV; goto out_free_dev; } /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only * with the ports we manage to register successfully. However we must * register at least one net device. */ for (i = 0; i < bi->port_number; ++i) { err = register_netdev(adapter->port[i].dev); if (err) CH_WARN("%s: cannot register net device %s, skipping\n", pci_name(pdev), adapter->port[i].dev->name); else { /* * Change the name we use for messages to the name of * the first successfully registered interface. */ if (!adapter->registered_device_map) adapter->name = adapter->port[i].dev->name; __set_bit(i, &adapter->registered_device_map); } } if (!adapter->registered_device_map) { CH_ERR("%s: could not register any net devices\n", pci_name(pdev)); goto out_release_adapter_res; } printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, bi->desc, adapter->params.chip_revision, adapter->params.pci.is_pcix ? "PCIX" : "PCI", adapter->params.pci.speed, adapter->params.pci.width); /* * Set the T1B ASIC and memory clocks. */ if (t1powersave) adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */ else adapter->t1powersave = HCLOCK; if (t1_is_T1B(adapter)) t1_clock(adapter, t1powersave); return 0; out_release_adapter_res: t1_free_sw_modules(adapter); out_free_dev: if (adapter) { if (adapter->regs) iounmap(adapter->regs); for (i = bi->port_number - 1; i >= 0; --i) if (adapter->port[i].dev) free_netdev(adapter->port[i].dev); } pci_release_regions(pdev); out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void bit_bang(struct adapter *adapter, int bitdata, int nbits) { int data; int i; u32 val; enum { S_CLOCK = 1 << 3, S_DATA = 1 << 4 }; for (i = (nbits - 1); i > -1; i--) { udelay(50); data = ((bitdata >> i) & 0x1); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); if (data) val |= S_DATA; else val &= ~S_DATA; udelay(50); /* Set SCLOCK low */ val &= ~S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Write SCLOCK high */ val |= S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); } } static int t1_clock(struct adapter *adapter, int mode) { u32 val; int M_CORE_VAL; int M_MEM_VAL; enum { M_CORE_BITS = 9, T_CORE_VAL = 0, T_CORE_BITS = 2, N_CORE_VAL = 0, N_CORE_BITS = 2, M_MEM_BITS = 9, T_MEM_VAL = 0, T_MEM_BITS = 2, N_MEM_VAL = 0, N_MEM_BITS = 2, NP_LOAD = 1 << 17, S_LOAD_MEM = 1 << 5, S_LOAD_CORE = 1 << 6, S_CLOCK = 1 << 3 }; if (!t1_is_T1B(adapter)) return -ENODEV; /* Can't re-clock this chip. */ if (mode & 2) return 0; /* show current mode. */ if ((adapter->t1powersave & 1) == (mode & 1)) return -EALREADY; /* ASIC already running in mode. */ if ((mode & 1) == HCLOCK) { M_CORE_VAL = 0x14; M_MEM_VAL = 0x18; adapter->t1powersave = HCLOCK; /* overclock */ } else { M_CORE_VAL = 0xe; M_MEM_VAL = 0x10; adapter->t1powersave = LCLOCK; /* underclock */ } /* Don't interrupt this serial stream! */ spin_lock(&adapter->tpi_lock); /* Initialize for ASIC core */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= NP_LOAD; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_CORE; val &= ~S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Serial program the ASIC clock synthesizer */ bit_bang(adapter, T_CORE_VAL, T_CORE_BITS); bit_bang(adapter, N_CORE_VAL, N_CORE_BITS); bit_bang(adapter, M_CORE_VAL, M_CORE_BITS); udelay(50); /* Finish ASIC core */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= S_LOAD_CORE; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_CORE; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Initialize for memory */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= NP_LOAD; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_MEM; val &= ~S_CLOCK; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Serial program the memory clock synthesizer */ bit_bang(adapter, T_MEM_VAL, T_MEM_BITS); bit_bang(adapter, N_MEM_VAL, N_MEM_BITS); bit_bang(adapter, M_MEM_VAL, M_MEM_BITS); udelay(50); /* Finish memory */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= S_LOAD_MEM; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_MEM; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); spin_unlock(&adapter->tpi_lock); return 0; } static inline void t1_sw_reset(struct pci_dev *pdev) { pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); } static void __devexit remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct adapter *adapter = dev->priv; int i; for_each_port(adapter, i) { if (test_bit(i, &adapter->registered_device_map)) unregister_netdev(adapter->port[i].dev); } t1_free_sw_modules(adapter); iounmap(adapter->regs); while (--i >= 0) { if (adapter->port[i].dev) free_netdev(adapter->port[i].dev); } pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); t1_sw_reset(pdev); } static struct pci_driver driver = { .name = DRV_NAME, .id_table = t1_pci_tbl, .probe = init_one, .remove = __devexit_p(remove_one), }; static int __init t1_init_module(void) { return pci_register_driver(&driver); } static void __exit t1_cleanup_module(void) { pci_unregister_driver(&driver); } module_init(t1_init_module); module_exit(t1_cleanup_module);