aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2009-03-18 12:22:17 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-03-18 12:22:17 -0400
commitd0573facf21d1e5cfbc1ddac272b7592722e6c01 (patch)
tree41d319003d57351da32c1a8968757a445fc0845e
parent8144737def6abc49457124424887436531bd6a50 (diff)
Staging: benet: remove driver now that it is merged in drivers/net/
The benet driver is now in the proper place in drivers/net/benet, so we can remove the staging version. Acked-by: Sathya Perla <sathyap@serverengines.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/benet/Kconfig7
-rw-r--r--drivers/staging/benet/MAINTAINERS6
-rw-r--r--drivers/staging/benet/Makefile14
-rw-r--r--drivers/staging/benet/TODO6
-rw-r--r--drivers/staging/benet/asyncmesg.h82
-rw-r--r--drivers/staging/benet/be_cm.h134
-rw-r--r--drivers/staging/benet/be_common.h53
-rw-r--r--drivers/staging/benet/be_ethtool.c348
-rw-r--r--drivers/staging/benet/be_init.c1382
-rw-r--r--drivers/staging/benet/be_int.c863
-rw-r--r--drivers/staging/benet/be_netif.c705
-rw-r--r--drivers/staging/benet/benet.h429
-rw-r--r--drivers/staging/benet/bestatus.h103
-rw-r--r--drivers/staging/benet/cev.h243
-rw-r--r--drivers/staging/benet/cq.c211
-rw-r--r--drivers/staging/benet/descriptors.h71
-rw-r--r--drivers/staging/benet/doorbells.h179
-rw-r--r--drivers/staging/benet/ep.h66
-rw-r--r--drivers/staging/benet/eq.c299
-rw-r--r--drivers/staging/benet/eth.c1273
-rw-r--r--drivers/staging/benet/etx_context.h55
-rw-r--r--drivers/staging/benet/funcobj.c565
-rw-r--r--drivers/staging/benet/fwcmd_common.h222
-rw-r--r--drivers/staging/benet/fwcmd_common_bmap.h717
-rw-r--r--drivers/staging/benet/fwcmd_eth_bmap.h280
-rw-r--r--drivers/staging/benet/fwcmd_hdr_bmap.h54
-rw-r--r--drivers/staging/benet/fwcmd_mcc.h94
-rw-r--r--drivers/staging/benet/fwcmd_opcodes.h244
-rw-r--r--drivers/staging/benet/fwcmd_types_bmap.h29
-rw-r--r--drivers/staging/benet/host_struct.h182
-rw-r--r--drivers/staging/benet/hwlib.h830
-rw-r--r--drivers/staging/benet/mpu.c1364
-rw-r--r--drivers/staging/benet/mpu.h74
-rw-r--r--drivers/staging/benet/mpu_context.h46
-rw-r--r--drivers/staging/benet/pcicfg.h825
-rw-r--r--drivers/staging/benet/post_codes.h111
-rw-r--r--drivers/staging/benet/regmap.h68
39 files changed, 0 insertions, 12237 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index ce6badded47a..211af86a6c55 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -73,8 +73,6 @@ source "drivers/staging/rt2860/Kconfig"
73 73
74source "drivers/staging/rt2870/Kconfig" 74source "drivers/staging/rt2870/Kconfig"
75 75
76source "drivers/staging/benet/Kconfig"
77
78source "drivers/staging/comedi/Kconfig" 76source "drivers/staging/comedi/Kconfig"
79 77
80source "drivers/staging/asus_oled/Kconfig" 78source "drivers/staging/asus_oled/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 9ddcc2bb3365..47a56f5ffabc 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_AGNX) += agnx/
19obj-$(CONFIG_OTUS) += otus/ 19obj-$(CONFIG_OTUS) += otus/
20obj-$(CONFIG_RT2860) += rt2860/ 20obj-$(CONFIG_RT2860) += rt2860/
21obj-$(CONFIG_RT2870) += rt2870/ 21obj-$(CONFIG_RT2870) += rt2870/
22obj-$(CONFIG_BENET) += benet/
23obj-$(CONFIG_COMEDI) += comedi/ 22obj-$(CONFIG_COMEDI) += comedi/
24obj-$(CONFIG_ASUS_OLED) += asus_oled/ 23obj-$(CONFIG_ASUS_OLED) += asus_oled/
25obj-$(CONFIG_PANEL) += panel/ 24obj-$(CONFIG_PANEL) += panel/
diff --git a/drivers/staging/benet/Kconfig b/drivers/staging/benet/Kconfig
deleted file mode 100644
index f6806074f998..000000000000
--- a/drivers/staging/benet/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
1config BENET
2 tristate "ServerEngines 10Gb NIC - BladeEngine"
3 depends on PCI && INET
4 select INET_LRO
5 help
6 This driver implements the NIC functionality for ServerEngines
7 10Gb network adapter BladeEngine (EC 3210).
diff --git a/drivers/staging/benet/MAINTAINERS b/drivers/staging/benet/MAINTAINERS
deleted file mode 100644
index d5ce340218b3..000000000000
--- a/drivers/staging/benet/MAINTAINERS
+++ /dev/null
@@ -1,6 +0,0 @@
1SERVER ENGINES 10Gbe NIC - BLADE-ENGINE
2P: Subbu Seetharaman
3M: subbus@serverengines.com
4L: netdev@vger.kernel.org
5W: http://www.serverengines.com
6S: Supported
diff --git a/drivers/staging/benet/Makefile b/drivers/staging/benet/Makefile
deleted file mode 100644
index 460b923b99bd..000000000000
--- a/drivers/staging/benet/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
1#
2# Makefile to build the network driver for ServerEngine's BladeEngine
3#
4obj-$(CONFIG_BENET) += benet.o
5
6benet-y := be_init.o \
7 be_int.o \
8 be_netif.o \
9 be_ethtool.o \
10 funcobj.o \
11 cq.o \
12 eq.o \
13 mpu.o \
14 eth.o
diff --git a/drivers/staging/benet/TODO b/drivers/staging/benet/TODO
deleted file mode 100644
index a51dfb59a62f..000000000000
--- a/drivers/staging/benet/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
1TODO:
2 - remove wrappers around common iowrite functions
3 - full netdev audit of common problems/issues
4
5Please send all patches and questions to Subbu Seetharaman
6<subbus@serverengines.com> and Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/benet/asyncmesg.h b/drivers/staging/benet/asyncmesg.h
deleted file mode 100644
index d1e779adb848..000000000000
--- a/drivers/staging/benet/asyncmesg.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __asyncmesg_amap_h__
21#define __asyncmesg_amap_h__
22#include "fwcmd_common.h"
23
24/* --- ASYNC_EVENT_CODES --- */
25#define ASYNC_EVENT_CODE_LINK_STATE (1)
26#define ASYNC_EVENT_CODE_ISCSI (2)
27
28/* --- ASYNC_LINK_STATES --- */
29#define ASYNC_EVENT_LINK_DOWN (0) /* Link Down on a port */
30#define ASYNC_EVENT_LINK_UP (1) /* Link Up on a port */
31
32/*
33 * The last 4 bytes of the async events have this common format. It allows
34 * the driver to distinguish [link]MCC_CQ_ENTRY[/link] structs from
35 * asynchronous events. Both arrive on the same completion queue. This
36 * structure also contains the common fields used to decode the async event.
37 */
38struct BE_ASYNC_EVENT_TRAILER_AMAP {
39 u8 rsvd0[8]; /* DWORD 0 */
40 u8 event_code[8]; /* DWORD 0 */
41 u8 event_type[8]; /* DWORD 0 */
42 u8 rsvd1[6]; /* DWORD 0 */
43 u8 async_event; /* DWORD 0 */
44 u8 valid; /* DWORD 0 */
45} __packed;
46struct ASYNC_EVENT_TRAILER_AMAP {
47 u32 dw[1];
48};
49
50/*
51 * Applicable in Initiator, Target and NIC modes.
52 * A link state async event is seen by all device drivers as soon they
53 * create an MCC ring. Thereafter, anytime the link status changes the
54 * drivers will receive a link state async event. Notifications continue to
55 * be sent until a driver destroys its MCC ring. A link down event is
56 * reported when either port loses link. A link up event is reported
57 * when either port regains link. When BE's failover mechanism is enabled, a
58 * link down on the active port causes traffic to be diverted to the standby
59 * port by the BE's ARM firmware (assuming the standby port has link). In
60 * this case, the standy port assumes the active status. Note: when link is
61 * restored on the failed port, traffic continues on the currently active
62 * port. The ARM firmware does not attempt to 'fail back' traffic to
63 * the restored port.
64 */
65struct BE_ASYNC_EVENT_LINK_STATE_AMAP {
66 u8 port0_link_status[8];
67 u8 port1_link_status[8];
68 u8 active_port[8];
69 u8 rsvd0[8]; /* DWORD 0 */
70 u8 port0_duplex[8];
71 u8 port0_speed[8];
72 u8 port1_duplex[8];
73 u8 port1_speed[8];
74 u8 port0_fault[8];
75 u8 port1_fault[8];
76 u8 rsvd1[2][8]; /* DWORD 2 */
77 struct BE_ASYNC_EVENT_TRAILER_AMAP trailer;
78} __packed;
79struct ASYNC_EVENT_LINK_STATE_AMAP {
80 u32 dw[4];
81};
82#endif /* __asyncmesg_amap_h__ */
diff --git a/drivers/staging/benet/be_cm.h b/drivers/staging/benet/be_cm.h
deleted file mode 100644
index b7a1dfd20c36..000000000000
--- a/drivers/staging/benet/be_cm.h
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __be_cm_amap_h__
21#define __be_cm_amap_h__
22#include "be_common.h"
23#include "etx_context.h"
24#include "mpu_context.h"
25
26/*
27 * --- CEV_WATERMARK_ENUM ---
28 * CQ/EQ Watermark Encodings. Encoded as number of free entries in
29 * Queue when Watermark is reached.
30 */
31#define CEV_WMARK_0 (0) /* Watermark when Queue full */
32#define CEV_WMARK_16 (1) /* Watermark at 16 free entries */
33#define CEV_WMARK_32 (2) /* Watermark at 32 free entries */
34#define CEV_WMARK_48 (3) /* Watermark at 48 free entries */
35#define CEV_WMARK_64 (4) /* Watermark at 64 free entries */
36#define CEV_WMARK_80 (5) /* Watermark at 80 free entries */
37#define CEV_WMARK_96 (6) /* Watermark at 96 free entries */
38#define CEV_WMARK_112 (7) /* Watermark at 112 free entries */
39#define CEV_WMARK_128 (8) /* Watermark at 128 free entries */
40#define CEV_WMARK_144 (9) /* Watermark at 144 free entries */
41#define CEV_WMARK_160 (10) /* Watermark at 160 free entries */
42#define CEV_WMARK_176 (11) /* Watermark at 176 free entries */
43#define CEV_WMARK_192 (12) /* Watermark at 192 free entries */
44#define CEV_WMARK_208 (13) /* Watermark at 208 free entries */
45#define CEV_WMARK_224 (14) /* Watermark at 224 free entries */
46#define CEV_WMARK_240 (15) /* Watermark at 240 free entries */
47
48/*
49 * --- CQ_CNT_ENUM ---
50 * Completion Queue Count Encodings.
51 */
52#define CEV_CQ_CNT_256 (0) /* CQ has 256 entries */
53#define CEV_CQ_CNT_512 (1) /* CQ has 512 entries */
54#define CEV_CQ_CNT_1024 (2) /* CQ has 1024 entries */
55
56/*
57 * --- EQ_CNT_ENUM ---
58 * Event Queue Count Encodings.
59 */
60#define CEV_EQ_CNT_256 (0) /* EQ has 256 entries (16-byte EQEs only) */
61#define CEV_EQ_CNT_512 (1) /* EQ has 512 entries (16-byte EQEs only) */
62#define CEV_EQ_CNT_1024 (2) /* EQ has 1024 entries (4-byte or */
63 /* 16-byte EQEs only) */
64#define CEV_EQ_CNT_2048 (3) /* EQ has 2048 entries (4-byte or */
65 /* 16-byte EQEs only) */
66#define CEV_EQ_CNT_4096 (4) /* EQ has 4096 entries (4-byte EQEs only) */
67
68/*
69 * --- EQ_SIZE_ENUM ---
70 * Event Queue Entry Size Encoding.
71 */
72#define CEV_EQ_SIZE_4 (0) /* EQE is 4 bytes */
73#define CEV_EQ_SIZE_16 (1) /* EQE is 16 bytes */
74
75/*
76 * Completion Queue Context Table Entry. Contains the state of a CQ.
77 * Located in RAM within the CEV block.
78 */
79struct BE_CQ_CONTEXT_AMAP {
80 u8 Cidx[11]; /* DWORD 0 */
81 u8 Watermark[4]; /* DWORD 0 */
82 u8 NoDelay; /* DWORD 0 */
83 u8 EPIdx[11]; /* DWORD 0 */
84 u8 Count[2]; /* DWORD 0 */
85 u8 valid; /* DWORD 0 */
86 u8 SolEvent; /* DWORD 0 */
87 u8 Eventable; /* DWORD 0 */
88 u8 Pidx[11]; /* DWORD 1 */
89 u8 PD[10]; /* DWORD 1 */
90 u8 EQID[7]; /* DWORD 1 */
91 u8 Func; /* DWORD 1 */
92 u8 WME; /* DWORD 1 */
93 u8 Stalled; /* DWORD 1 */
94 u8 Armed; /* DWORD 1 */
95} __packed;
96struct CQ_CONTEXT_AMAP {
97 u32 dw[2];
98};
99
100/*
101 * Event Queue Context Table Entry. Contains the state of an EQ.
102 * Located in RAM in the CEV block.
103 */
104struct BE_EQ_CONTEXT_AMAP {
105 u8 Cidx[13]; /* DWORD 0 */
106 u8 rsvd0[2]; /* DWORD 0 */
107 u8 Func; /* DWORD 0 */
108 u8 EPIdx[13]; /* DWORD 0 */
109 u8 valid; /* DWORD 0 */
110 u8 rsvd1; /* DWORD 0 */
111 u8 Size; /* DWORD 0 */
112 u8 Pidx[13]; /* DWORD 1 */
113 u8 rsvd2[3]; /* DWORD 1 */
114 u8 PD[10]; /* DWORD 1 */
115 u8 Count[3]; /* DWORD 1 */
116 u8 SolEvent; /* DWORD 1 */
117 u8 Stalled; /* DWORD 1 */
118 u8 Armed; /* DWORD 1 */
119 u8 Watermark[4]; /* DWORD 2 */
120 u8 WME; /* DWORD 2 */
121 u8 rsvd3[3]; /* DWORD 2 */
122 u8 EventVect[6]; /* DWORD 2 */
123 u8 rsvd4[2]; /* DWORD 2 */
124 u8 Delay[8]; /* DWORD 2 */
125 u8 rsvd5[6]; /* DWORD 2 */
126 u8 TMR; /* DWORD 2 */
127 u8 rsvd6; /* DWORD 2 */
128 u8 rsvd7[32]; /* DWORD 3 */
129} __packed;
130struct EQ_CONTEXT_AMAP {
131 u32 dw[4];
132};
133
134#endif /* __be_cm_amap_h__ */
diff --git a/drivers/staging/benet/be_common.h b/drivers/staging/benet/be_common.h
deleted file mode 100644
index 7e63dc5e3348..000000000000
--- a/drivers/staging/benet/be_common.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __be_common_amap_h__
21#define __be_common_amap_h__
22
23/* Physical Address. */
24struct BE_PHYS_ADDR_AMAP {
25 u8 lo[32]; /* DWORD 0 */
26 u8 hi[32]; /* DWORD 1 */
27} __packed;
28struct PHYS_ADDR_AMAP {
29 u32 dw[2];
30};
31
32/* Virtual Address. */
33struct BE_VIRT_ADDR_AMAP {
34 u8 lo[32]; /* DWORD 0 */
35 u8 hi[32]; /* DWORD 1 */
36} __packed;
37struct VIRT_ADDR_AMAP {
38 u32 dw[2];
39};
40
41/* Scatter gather element. */
42struct BE_SGE_AMAP {
43 u8 addr_hi[32]; /* DWORD 0 */
44 u8 addr_lo[32]; /* DWORD 1 */
45 u8 rsvd0[32]; /* DWORD 2 */
46 u8 len[16]; /* DWORD 3 */
47 u8 rsvd1[16]; /* DWORD 3 */
48} __packed;
49struct SGE_AMAP {
50 u32 dw[4];
51};
52
53#endif /* __be_common_amap_h__ */
diff --git a/drivers/staging/benet/be_ethtool.c b/drivers/staging/benet/be_ethtool.c
deleted file mode 100644
index 027af85707aa..000000000000
--- a/drivers/staging/benet/be_ethtool.c
+++ /dev/null
@@ -1,348 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * be_ethtool.c
19 *
20 * This file contains various functions that ethtool can use
21 * to talk to the driver and the BE H/W.
22 */
23
24#include "benet.h"
25
26#include <linux/ethtool.h>
27
28static const char benet_gstrings_stats[][ETH_GSTRING_LEN] = {
29/* net_device_stats */
30 "rx_packets",
31 "tx_packets",
32 "rx_bytes",
33 "tx_bytes",
34 "rx_errors",
35 "tx_errors",
36 "rx_dropped",
37 "tx_dropped",
38 "multicast",
39 "collisions",
40 "rx_length_errors",
41 "rx_over_errors",
42 "rx_crc_errors",
43 "rx_frame_errors",
44 "rx_fifo_errors",
45 "rx_missed_errors",
46 "tx_aborted_errors",
47 "tx_carrier_errors",
48 "tx_fifo_errors",
49 "tx_heartbeat_errors",
50 "tx_window_errors",
51 "rx_compressed",
52 "tc_compressed",
53/* BE driver Stats */
54 "bes_tx_reqs",
55 "bes_tx_fails",
56 "bes_fwd_reqs",
57 "bes_tx_wrbs",
58 "bes_interrupts",
59 "bes_events",
60 "bes_tx_events",
61 "bes_rx_events",
62 "bes_tx_compl",
63 "bes_rx_compl",
64 "bes_ethrx_post_fail",
65 "bes_802_3_dropped_frames",
66 "bes_802_3_malformed_frames",
67 "bes_rx_misc_pkts",
68 "bes_eth_tx_rate",
69 "bes_eth_rx_rate",
70 "Num Packets collected",
71 "Num Times Flushed",
72};
73
74#define NET_DEV_STATS_LEN \
75 (sizeof(struct net_device_stats)/sizeof(unsigned long))
76
77#define BENET_STATS_LEN ARRAY_SIZE(benet_gstrings_stats)
78
79static void
80be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
81{
82 struct be_net_object *pnob = netdev_priv(netdev);
83 struct be_adapter *adapter = pnob->adapter;
84
85 strncpy(drvinfo->driver, be_driver_name, 32);
86 strncpy(drvinfo->version, be_drvr_ver, 32);
87 strncpy(drvinfo->fw_version, be_fw_ver, 32);
88 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
89 drvinfo->testinfo_len = 0;
90 drvinfo->regdump_len = 0;
91 drvinfo->eedump_len = 0;
92}
93
94static int
95be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
96{
97 struct be_net_object *pnob = netdev_priv(netdev);
98 struct be_adapter *adapter = pnob->adapter;
99
100 coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
101
102 coalesce->rx_coalesce_usecs = adapter->cur_eqd;
103 coalesce->rx_coalesce_usecs_high = adapter->max_eqd;
104 coalesce->rx_coalesce_usecs_low = adapter->min_eqd;
105
106 coalesce->tx_coalesce_usecs = adapter->cur_eqd;
107 coalesce->tx_coalesce_usecs_high = adapter->max_eqd;
108 coalesce->tx_coalesce_usecs_low = adapter->min_eqd;
109
110 coalesce->use_adaptive_rx_coalesce = adapter->enable_aic;
111 coalesce->use_adaptive_tx_coalesce = adapter->enable_aic;
112
113 return 0;
114}
115
116/*
117 * This routine is used to set interrup coalescing delay *as well as*
118 * the number of pkts to coalesce for LRO.
119 */
120static int
121be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
122{
123 struct be_net_object *pnob = netdev_priv(netdev);
124 struct be_adapter *adapter = pnob->adapter;
125 struct be_eq_object *eq_objectp;
126 u32 max, min, cur;
127 int status;
128
129 adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
130 if (adapter->max_rx_coal >= BE_LRO_MAX_PKTS)
131 adapter->max_rx_coal = BE_LRO_MAX_PKTS;
132
133 if (adapter->enable_aic == 0 &&
134 coalesce->use_adaptive_rx_coalesce == 1) {
135 /* if AIC is being turned on now, start with an EQD of 0 */
136 adapter->cur_eqd = 0;
137 }
138 adapter->enable_aic = coalesce->use_adaptive_rx_coalesce;
139
140 /* round off to nearest multiple of 8 */
141 max = (((coalesce->rx_coalesce_usecs_high + 4) >> 3) << 3);
142 min = (((coalesce->rx_coalesce_usecs_low + 4) >> 3) << 3);
143 cur = (((coalesce->rx_coalesce_usecs + 4) >> 3) << 3);
144
145 if (adapter->enable_aic) {
146 /* accept low and high if AIC is enabled */
147 if (max > MAX_EQD)
148 max = MAX_EQD;
149 if (min > max)
150 min = max;
151 adapter->max_eqd = max;
152 adapter->min_eqd = min;
153 if (adapter->cur_eqd > max)
154 adapter->cur_eqd = max;
155 if (adapter->cur_eqd < min)
156 adapter->cur_eqd = min;
157 } else {
158 /* accept specified coalesce_usecs only if AIC is disabled */
159 if (cur > MAX_EQD)
160 cur = MAX_EQD;
161 eq_objectp = &pnob->event_q_obj;
162 status =
163 be_eq_modify_delay(&pnob->fn_obj, 1, &eq_objectp, &cur,
164 NULL, NULL, NULL);
165 if (status == BE_SUCCESS)
166 adapter->cur_eqd = cur;
167 }
168 return 0;
169}
170
171static u32 be_get_rx_csum(struct net_device *netdev)
172{
173 struct be_net_object *pnob = netdev_priv(netdev);
174 struct be_adapter *adapter = pnob->adapter;
175 return adapter->rx_csum;
176}
177
178static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
179{
180 struct be_net_object *pnob = netdev_priv(netdev);
181 struct be_adapter *adapter = pnob->adapter;
182
183 if (data)
184 adapter->rx_csum = 1;
185 else
186 adapter->rx_csum = 0;
187
188 return 0;
189}
190
191static void
192be_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
193{
194 switch (stringset) {
195 case ETH_SS_STATS:
196 memcpy(data, *benet_gstrings_stats,
197 sizeof(benet_gstrings_stats));
198 break;
199 }
200}
201
202static int be_get_stats_count(struct net_device *netdev)
203{
204 return BENET_STATS_LEN;
205}
206
207static void
208be_get_ethtool_stats(struct net_device *netdev,
209 struct ethtool_stats *stats, uint64_t *data)
210{
211 struct be_net_object *pnob = netdev_priv(netdev);
212 struct be_adapter *adapter = pnob->adapter;
213 int i;
214
215 benet_get_stats(netdev);
216
217 for (i = 0; i <= NET_DEV_STATS_LEN; i++)
218 data[i] = ((unsigned long *)&adapter->benet_stats)[i];
219
220 data[i] = adapter->be_stat.bes_tx_reqs;
221 data[i++] = adapter->be_stat.bes_tx_fails;
222 data[i++] = adapter->be_stat.bes_fwd_reqs;
223 data[i++] = adapter->be_stat.bes_tx_wrbs;
224
225 data[i++] = adapter->be_stat.bes_ints;
226 data[i++] = adapter->be_stat.bes_events;
227 data[i++] = adapter->be_stat.bes_tx_events;
228 data[i++] = adapter->be_stat.bes_rx_events;
229 data[i++] = adapter->be_stat.bes_tx_compl;
230 data[i++] = adapter->be_stat.bes_rx_compl;
231 data[i++] = adapter->be_stat.bes_ethrx_post_fail;
232 data[i++] = adapter->be_stat.bes_802_3_dropped_frames;
233 data[i++] = adapter->be_stat.bes_802_3_malformed_frames;
234 data[i++] = adapter->be_stat.bes_rx_misc_pkts;
235 data[i++] = adapter->be_stat.bes_eth_tx_rate;
236 data[i++] = adapter->be_stat.bes_eth_rx_rate;
237 data[i++] = adapter->be_stat.bes_rx_coal;
238 data[i++] = adapter->be_stat.bes_rx_flush;
239
240}
241
242static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
243{
244 ecmd->speed = SPEED_10000;
245 ecmd->duplex = DUPLEX_FULL;
246 ecmd->autoneg = AUTONEG_DISABLE;
247 return 0;
248}
249
250/* Get the Ring parameters from the pnob */
251static void
252be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
253{
254 struct be_net_object *pnob = netdev_priv(netdev);
255
256 /* Pre Set Maxims */
257 ring->rx_max_pending = pnob->rx_q_len;
258 ring->rx_mini_max_pending = ring->rx_mini_max_pending;
259 ring->rx_jumbo_max_pending = ring->rx_jumbo_max_pending;
260 ring->tx_max_pending = pnob->tx_q_len;
261
262 /* Current hardware Settings */
263 ring->rx_pending = atomic_read(&pnob->rx_q_posted);
264 ring->rx_mini_pending = ring->rx_mini_pending;
265 ring->rx_jumbo_pending = ring->rx_jumbo_pending;
266 ring->tx_pending = atomic_read(&pnob->tx_q_used);
267
268}
269
270static void
271be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
272{
273 struct be_net_object *pnob = netdev_priv(netdev);
274 bool rxfc, txfc;
275 int status;
276
277 status = be_eth_get_flow_control(&pnob->fn_obj, &txfc, &rxfc);
278 if (status != BE_SUCCESS) {
279 dev_info(&netdev->dev, "Unable to get pause frame settings\n");
280 /* return defaults */
281 ecmd->rx_pause = 1;
282 ecmd->tx_pause = 0;
283 ecmd->autoneg = AUTONEG_ENABLE;
284 return;
285 }
286
287 if (txfc == true)
288 ecmd->tx_pause = 1;
289 else
290 ecmd->tx_pause = 0;
291
292 if (rxfc == true)
293 ecmd->rx_pause = 1;
294 else
295 ecmd->rx_pause = 0;
296
297 ecmd->autoneg = AUTONEG_ENABLE;
298}
299
300static int
301be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
302{
303 struct be_net_object *pnob = netdev_priv(netdev);
304 bool txfc, rxfc;
305 int status;
306
307 if (ecmd->autoneg != AUTONEG_ENABLE)
308 return -EINVAL;
309
310 if (ecmd->tx_pause)
311 txfc = true;
312 else
313 txfc = false;
314
315 if (ecmd->rx_pause)
316 rxfc = true;
317 else
318 rxfc = false;
319
320 status = be_eth_set_flow_control(&pnob->fn_obj, txfc, rxfc);
321 if (status != BE_SUCCESS) {
322 dev_info(&netdev->dev, "Unable to set pause frame settings\n");
323 return -1;
324 }
325 return 0;
326}
327
328struct ethtool_ops be_ethtool_ops = {
329 .get_settings = be_get_settings,
330 .get_drvinfo = be_get_drvinfo,
331 .get_link = ethtool_op_get_link,
332 .get_coalesce = be_get_coalesce,
333 .set_coalesce = be_set_coalesce,
334 .get_ringparam = be_get_ringparam,
335 .get_pauseparam = be_get_pauseparam,
336 .set_pauseparam = be_set_pauseparam,
337 .get_rx_csum = be_get_rx_csum,
338 .set_rx_csum = be_set_rx_csum,
339 .get_tx_csum = ethtool_op_get_tx_csum,
340 .set_tx_csum = ethtool_op_set_tx_csum,
341 .get_sg = ethtool_op_get_sg,
342 .set_sg = ethtool_op_set_sg,
343 .get_tso = ethtool_op_get_tso,
344 .set_tso = ethtool_op_set_tso,
345 .get_strings = be_get_strings,
346 .get_stats_count = be_get_stats_count,
347 .get_ethtool_stats = be_get_ethtool_stats,
348};
diff --git a/drivers/staging/benet/be_init.c b/drivers/staging/benet/be_init.c
deleted file mode 100644
index 12a026c3f9e1..000000000000
--- a/drivers/staging/benet/be_init.c
+++ /dev/null
@@ -1,1382 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#include <linux/etherdevice.h>
18#include "benet.h"
19
20#define DRVR_VERSION "1.0.728"
21
22static const struct pci_device_id be_device_id_table[] = {
23 {PCI_DEVICE(0x19a2, 0x0201)},
24 {0}
25};
26
27MODULE_DEVICE_TABLE(pci, be_device_id_table);
28
29MODULE_VERSION(DRVR_VERSION);
30
31#define DRV_DESCRIPTION "ServerEngines BladeEngine Network Driver Version "
32
33MODULE_DESCRIPTION(DRV_DESCRIPTION DRVR_VERSION);
34MODULE_AUTHOR("ServerEngines");
35MODULE_LICENSE("GPL");
36
37static unsigned int msix = 1;
38module_param(msix, uint, S_IRUGO);
39MODULE_PARM_DESC(msix, "Use MSI-x interrupts");
40
41static unsigned int rxbuf_size = 2048; /* Default RX frag size */
42module_param(rxbuf_size, uint, S_IRUGO);
43MODULE_PARM_DESC(rxbuf_size, "Size of buffers to hold Rx data");
44
45const char be_drvr_ver[] = DRVR_VERSION;
46char be_fw_ver[32]; /* F/W version filled in by be_probe */
47char be_driver_name[] = "benet";
48
49/*
50 * Number of entries in each queue.
51 */
52#define EVENT_Q_LEN 1024
53#define ETH_TXQ_LEN 2048
54#define ETH_TXCQ_LEN 1024
55#define ETH_RXQ_LEN 1024 /* Does not support any other value */
56#define ETH_UC_RXCQ_LEN 1024
57#define ETH_BC_RXCQ_LEN 256
58#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
59#define MCC_CQ_LEN 256
60
61/* Bit mask describing events of interest to be traced */
62unsigned int trace_level;
63
64static int
65init_pci_be_function(struct be_adapter *adapter, struct pci_dev *pdev)
66{
67 u64 pa;
68
69 /* CSR */
70 pa = pci_resource_start(pdev, 2);
71 adapter->csr_va = ioremap_nocache(pa, pci_resource_len(pdev, 2));
72 if (adapter->csr_va == NULL)
73 return -ENOMEM;
74
75 /* Door Bell */
76 pa = pci_resource_start(pdev, 4);
77 adapter->db_va = ioremap_nocache(pa, (128 * 1024));
78 if (adapter->db_va == NULL) {
79 iounmap(adapter->csr_va);
80 return -ENOMEM;
81 }
82
83 /* PCI */
84 pa = pci_resource_start(pdev, 1);
85 adapter->pci_va = ioremap_nocache(pa, pci_resource_len(pdev, 1));
86 if (adapter->pci_va == NULL) {
87 iounmap(adapter->csr_va);
88 iounmap(adapter->db_va);
89 return -ENOMEM;
90 }
91 return 0;
92}
93
94/*
95 This function enables the interrupt corresponding to the Event
96 queue ID for the given NetObject
97*/
98void be_enable_eq_intr(struct be_net_object *pnob)
99{
100 struct CQ_DB_AMAP cqdb;
101 cqdb.dw[0] = 0;
102 AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
103 AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 1);
104 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
105 AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
106 PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
107}
108
109/*
110 This function disables the interrupt corresponding to the Event
111 queue ID for the given NetObject
112*/
113void be_disable_eq_intr(struct be_net_object *pnob)
114{
115 struct CQ_DB_AMAP cqdb;
116 cqdb.dw[0] = 0;
117 AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
118 AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 0);
119 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
120 AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
121 PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
122}
123
124/*
125 This function enables the interrupt from the network function
126 of the BladeEngine. Use the function be_disable_eq_intr()
127 to enable the interrupt from the event queue of only one specific
128 NetObject
129*/
130void be_enable_intr(struct be_net_object *pnob)
131{
132 struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
133 u32 host_intr;
134
135 ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
136 host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
137 hostintr, ctrl.dw);
138 if (!host_intr) {
139 AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
140 hostintr, ctrl.dw, 1);
141 PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
142 ctrl.dw[0]);
143 }
144}
145
146/*
147 This function disables the interrupt from the network function of
148 the BladeEngine. Use the function be_disable_eq_intr() to
149 disable the interrupt from the event queue of only one specific NetObject
150*/
151void be_disable_intr(struct be_net_object *pnob)
152{
153
154 struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
155 u32 host_intr;
156 ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
157 host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
158 hostintr, ctrl.dw);
159 if (host_intr) {
160 AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, hostintr,
161 ctrl.dw, 0);
162 PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
163 ctrl.dw[0]);
164 }
165}
166
167static int be_enable_msix(struct be_adapter *adapter)
168{
169 int i, ret;
170
171 if (!msix)
172 return -1;
173
174 for (i = 0; i < BE_MAX_REQ_MSIX_VECTORS; i++)
175 adapter->msix_entries[i].entry = i;
176
177 ret = pci_enable_msix(adapter->pdev, adapter->msix_entries,
178 BE_MAX_REQ_MSIX_VECTORS);
179
180 if (ret == 0)
181 adapter->msix_enabled = 1;
182 return ret;
183}
184
185static int be_register_isr(struct be_adapter *adapter,
186 struct be_net_object *pnob)
187{
188 struct net_device *netdev = pnob->netdev;
189 int intx = 0, r;
190
191 netdev->irq = adapter->pdev->irq;
192 r = be_enable_msix(adapter);
193
194 if (r == 0) {
195 r = request_irq(adapter->msix_entries[0].vector,
196 be_int, IRQF_SHARED, netdev->name, netdev);
197 if (r) {
198 printk(KERN_WARNING
199 "MSIX Request IRQ failed - Errno %d\n", r);
200 intx = 1;
201 pci_disable_msix(adapter->pdev);
202 adapter->msix_enabled = 0;
203 }
204 } else {
205 intx = 1;
206 }
207
208 if (intx) {
209 r = request_irq(netdev->irq, be_int, IRQF_SHARED,
210 netdev->name, netdev);
211 if (r) {
212 printk(KERN_WARNING
213 "INTx Request IRQ failed - Errno %d\n", r);
214 return -1;
215 }
216 }
217 adapter->isr_registered = 1;
218 return 0;
219}
220
221static void be_unregister_isr(struct be_adapter *adapter)
222{
223 struct net_device *netdev = adapter->netdevp;
224 if (adapter->isr_registered) {
225 if (adapter->msix_enabled) {
226 free_irq(adapter->msix_entries[0].vector, netdev);
227 pci_disable_msix(adapter->pdev);
228 adapter->msix_enabled = 0;
229 } else {
230 free_irq(netdev->irq, netdev);
231 }
232 adapter->isr_registered = 0;
233 }
234}
235
236/*
237 This function processes the Flush Completions that are issued by the
238 ARM F/W, when a Recv Ring is destroyed. A flush completion is
239 identified when a Rx COmpl descriptor has the tcpcksum and udpcksum
240 set and the pktsize is 32. These completions are received on the
241 Rx Completion Queue.
242*/
243static u32 be_process_rx_flush_cmpl(struct be_net_object *pnob)
244{
245 struct ETH_RX_COMPL_AMAP *rxcp;
246 unsigned int i = 0;
247 while ((rxcp = be_get_rx_cmpl(pnob)) != NULL) {
248 be_notify_cmpl(pnob, 1, pnob->rx_cq_id, 1);
249 i++;
250 }
251 return i;
252}
253
254static void be_tx_q_clean(struct be_net_object *pnob)
255{
256 while (atomic_read(&pnob->tx_q_used))
257 process_one_tx_compl(pnob, tx_compl_lastwrb_idx_get(pnob));
258}
259
260static void be_rx_q_clean(struct be_net_object *pnob)
261{
262 if (pnob->rx_ctxt) {
263 int i;
264 struct be_rx_page_info *rx_page_info;
265 for (i = 0; i < pnob->rx_q_len; i++) {
266 rx_page_info = &(pnob->rx_page_info[i]);
267 if (!pnob->rx_pg_shared || rx_page_info->page_offset) {
268 pci_unmap_page(pnob->adapter->pdev,
269 pci_unmap_addr(rx_page_info, bus),
270 pnob->rx_buf_size,
271 PCI_DMA_FROMDEVICE);
272 }
273 if (rx_page_info->page)
274 put_page(rx_page_info->page);
275 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
276 }
277 pnob->rx_pg_info_hd = 0;
278 }
279}
280
281static void be_destroy_netobj(struct be_net_object *pnob)
282{
283 int status;
284
285 if (pnob->tx_q_created) {
286 status = be_eth_sq_destroy(&pnob->tx_q_obj);
287 pnob->tx_q_created = 0;
288 }
289
290 if (pnob->rx_q_created) {
291 status = be_eth_rq_destroy(&pnob->rx_q_obj);
292 if (status != 0) {
293 status = be_eth_rq_destroy_options(&pnob->rx_q_obj, 0,
294 NULL, NULL);
295 BUG_ON(status);
296 }
297 pnob->rx_q_created = 0;
298 }
299
300 be_process_rx_flush_cmpl(pnob);
301
302 if (pnob->tx_cq_created) {
303 status = be_cq_destroy(&pnob->tx_cq_obj);
304 pnob->tx_cq_created = 0;
305 }
306
307 if (pnob->rx_cq_created) {
308 status = be_cq_destroy(&pnob->rx_cq_obj);
309 pnob->rx_cq_created = 0;
310 }
311
312 if (pnob->mcc_q_created) {
313 status = be_mcc_ring_destroy(&pnob->mcc_q_obj);
314 pnob->mcc_q_created = 0;
315 }
316 if (pnob->mcc_cq_created) {
317 status = be_cq_destroy(&pnob->mcc_cq_obj);
318 pnob->mcc_cq_created = 0;
319 }
320
321 if (pnob->event_q_created) {
322 status = be_eq_destroy(&pnob->event_q_obj);
323 pnob->event_q_created = 0;
324 }
325 be_function_cleanup(&pnob->fn_obj);
326}
327
328/*
329 * free all resources associated with a pnob
330 * Called at the time of module cleanup as well a any error during
331 * module init. Some resources may be partially allocated in a NetObj.
332 */
333static void netobject_cleanup(struct be_adapter *adapter,
334 struct be_net_object *pnob)
335{
336 struct net_device *netdev = adapter->netdevp;
337
338 if (netif_running(netdev)) {
339 netif_stop_queue(netdev);
340 be_wait_nic_tx_cmplx_cmpl(pnob);
341 be_disable_eq_intr(pnob);
342 }
343
344 be_unregister_isr(adapter);
345
346 if (adapter->tasklet_started) {
347 tasklet_kill(&(adapter->sts_handler));
348 adapter->tasklet_started = 0;
349 }
350 if (pnob->fn_obj_created)
351 be_disable_intr(pnob);
352
353 if (adapter->dev_state != BE_DEV_STATE_NONE)
354 unregister_netdev(netdev);
355
356 if (pnob->fn_obj_created)
357 be_destroy_netobj(pnob);
358
359 adapter->net_obj = NULL;
360 adapter->netdevp = NULL;
361
362 be_rx_q_clean(pnob);
363 if (pnob->rx_ctxt) {
364 kfree(pnob->rx_page_info);
365 kfree(pnob->rx_ctxt);
366 }
367
368 be_tx_q_clean(pnob);
369 kfree(pnob->tx_ctxt);
370
371 if (pnob->mcc_q)
372 pci_free_consistent(adapter->pdev, pnob->mcc_q_size,
373 pnob->mcc_q, pnob->mcc_q_bus);
374
375 if (pnob->mcc_wrb_ctxt)
376 free_pages((unsigned long)pnob->mcc_wrb_ctxt,
377 get_order(pnob->mcc_wrb_ctxt_size));
378
379 if (pnob->mcc_cq)
380 pci_free_consistent(adapter->pdev, pnob->mcc_cq_size,
381 pnob->mcc_cq, pnob->mcc_cq_bus);
382
383 if (pnob->event_q)
384 pci_free_consistent(adapter->pdev, pnob->event_q_size,
385 pnob->event_q, pnob->event_q_bus);
386
387 if (pnob->tx_cq)
388 pci_free_consistent(adapter->pdev, pnob->tx_cq_size,
389 pnob->tx_cq, pnob->tx_cq_bus);
390
391 if (pnob->tx_q)
392 pci_free_consistent(adapter->pdev, pnob->tx_q_size,
393 pnob->tx_q, pnob->tx_q_bus);
394
395 if (pnob->rx_q)
396 pci_free_consistent(adapter->pdev, pnob->rx_q_size,
397 pnob->rx_q, pnob->rx_q_bus);
398
399 if (pnob->rx_cq)
400 pci_free_consistent(adapter->pdev, pnob->rx_cq_size,
401 pnob->rx_cq, pnob->rx_cq_bus);
402
403
404 if (pnob->mb_ptr)
405 pci_free_consistent(adapter->pdev, pnob->mb_size, pnob->mb_ptr,
406 pnob->mb_bus);
407
408 free_netdev(netdev);
409}
410
411
412static int be_nob_ring_alloc(struct be_adapter *adapter,
413 struct be_net_object *pnob)
414{
415 u32 size;
416
417 /* Mail box rd; mailbox pointer needs to be 16 byte aligned */
418 pnob->mb_size = sizeof(struct MCC_MAILBOX_AMAP) + 16;
419 pnob->mb_ptr = pci_alloc_consistent(adapter->pdev, pnob->mb_size,
420 &pnob->mb_bus);
421 if (!pnob->mb_bus)
422 return -1;
423 memset(pnob->mb_ptr, 0, pnob->mb_size);
424 pnob->mb_rd.va = PTR_ALIGN(pnob->mb_ptr, 16);
425 pnob->mb_rd.pa = PTR_ALIGN(pnob->mb_bus, 16);
426 pnob->mb_rd.length = sizeof(struct MCC_MAILBOX_AMAP);
427 /*
428 * Event queue
429 */
430 pnob->event_q_len = EVENT_Q_LEN;
431 pnob->event_q_size = pnob->event_q_len * sizeof(struct EQ_ENTRY_AMAP);
432 pnob->event_q = pci_alloc_consistent(adapter->pdev, pnob->event_q_size,
433 &pnob->event_q_bus);
434 if (!pnob->event_q_bus)
435 return -1;
436 memset(pnob->event_q, 0, pnob->event_q_size);
437 /*
438 * Eth TX queue
439 */
440 pnob->tx_q_len = ETH_TXQ_LEN;
441 pnob->tx_q_port = 0;
442 pnob->tx_q_size = pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP);
443 pnob->tx_q = pci_alloc_consistent(adapter->pdev, pnob->tx_q_size,
444 &pnob->tx_q_bus);
445 if (!pnob->tx_q_bus)
446 return -1;
447 memset(pnob->tx_q, 0, pnob->tx_q_size);
448 /*
449 * Eth TX Compl queue
450 */
451 pnob->txcq_len = ETH_TXCQ_LEN;
452 pnob->tx_cq_size = pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP);
453 pnob->tx_cq = pci_alloc_consistent(adapter->pdev, pnob->tx_cq_size,
454 &pnob->tx_cq_bus);
455 if (!pnob->tx_cq_bus)
456 return -1;
457 memset(pnob->tx_cq, 0, pnob->tx_cq_size);
458 /*
459 * Eth RX queue
460 */
461 pnob->rx_q_len = ETH_RXQ_LEN;
462 pnob->rx_q_size = pnob->rx_q_len * sizeof(struct ETH_RX_D_AMAP);
463 pnob->rx_q = pci_alloc_consistent(adapter->pdev, pnob->rx_q_size,
464 &pnob->rx_q_bus);
465 if (!pnob->rx_q_bus)
466 return -1;
467 memset(pnob->rx_q, 0, pnob->rx_q_size);
468 /*
469 * Eth Unicast RX Compl queue
470 */
471 pnob->rx_cq_len = ETH_UC_RXCQ_LEN;
472 pnob->rx_cq_size = pnob->rx_cq_len *
473 sizeof(struct ETH_RX_COMPL_AMAP);
474 pnob->rx_cq = pci_alloc_consistent(adapter->pdev, pnob->rx_cq_size,
475 &pnob->rx_cq_bus);
476 if (!pnob->rx_cq_bus)
477 return -1;
478 memset(pnob->rx_cq, 0, pnob->rx_cq_size);
479
480 /* TX resources */
481 size = pnob->tx_q_len * sizeof(void **);
482 pnob->tx_ctxt = kzalloc(size, GFP_KERNEL);
483 if (pnob->tx_ctxt == NULL)
484 return -1;
485
486 /* RX resources */
487 size = pnob->rx_q_len * sizeof(void *);
488 pnob->rx_ctxt = kzalloc(size, GFP_KERNEL);
489 if (pnob->rx_ctxt == NULL)
490 return -1;
491
492 size = (pnob->rx_q_len * sizeof(struct be_rx_page_info));
493 pnob->rx_page_info = kzalloc(size, GFP_KERNEL);
494 if (pnob->rx_page_info == NULL)
495 return -1;
496
497 adapter->eth_statsp = kzalloc(sizeof(struct FWCMD_ETH_GET_STATISTICS),
498 GFP_KERNEL);
499 if (adapter->eth_statsp == NULL)
500 return -1;
501 pnob->rx_buf_size = rxbuf_size;
502 return 0;
503}
504
505/*
506 This function initializes the be_net_object for subsequent
507 network operations.
508
509 Before calling this function, the driver must have allocated
510 space for the NetObject structure, initialized the structure,
511 allocated DMAable memory for all the network queues that form
512 part of the NetObject and populated the start address (virtual)
513 and number of entries allocated for each queue in the NetObject structure.
514
515 The driver must also have allocated memory to hold the
516 mailbox structure (MCC_MAILBOX) and post the physical address,
517 virtual addresses and the size of the mailbox memory in the
518 NetObj.mb_rd. This structure is used by BECLIB for
519 initial communication with the embedded MCC processor. BECLIB
520 uses the mailbox until MCC rings are created for more efficient
521 communication with the MCC processor.
522
523 If the driver wants to create multiple network interface for more
524 than one protection domain, it can call be_create_netobj()
525 multiple times once for each protection domain. A Maximum of
526 32 protection domains are supported.
527
528*/
529static int
530be_create_netobj(struct be_net_object *pnob, u8 __iomem *csr_va,
531 u8 __iomem *db_va, u8 __iomem *pci_va)
532{
533 int status = 0;
534 bool eventable = false, tx_no_delay = false, rx_no_delay = false;
535 struct be_eq_object *eq_objectp = NULL;
536 struct be_function_object *pfob = &pnob->fn_obj;
537 struct ring_desc rd;
538 u32 set_rxbuf_size;
539 u32 tx_cmpl_wm = CEV_WMARK_96; /* 0xffffffff to disable */
540 u32 rx_cmpl_wm = CEV_WMARK_160; /* 0xffffffff to disable */
541 u32 eq_delay = 0; /* delay in 8usec units. 0xffffffff to disable */
542
543 memset(&rd, 0, sizeof(struct ring_desc));
544
545 status = be_function_object_create(csr_va, db_va, pci_va,
546 BE_FUNCTION_TYPE_NETWORK, &pnob->mb_rd, pfob);
547 if (status != BE_SUCCESS)
548 return status;
549 pnob->fn_obj_created = true;
550
551 if (tx_cmpl_wm == 0xffffffff)
552 tx_no_delay = true;
553 if (rx_cmpl_wm == 0xffffffff)
554 rx_no_delay = true;
555 /*
556 * now create the necessary rings
557 * Event Queue first.
558 */
559 if (pnob->event_q_len) {
560 rd.va = pnob->event_q;
561 rd.pa = pnob->event_q_bus;
562 rd.length = pnob->event_q_size;
563
564 status = be_eq_create(pfob, &rd, 4, pnob->event_q_len,
565 (u32) -1, /* CEV_WMARK_* or -1 */
566 eq_delay, /* in 8us units, or -1 */
567 &pnob->event_q_obj);
568 if (status != BE_SUCCESS)
569 goto error_ret;
570 pnob->event_q_id = pnob->event_q_obj.eq_id;
571 pnob->event_q_created = 1;
572 eventable = true;
573 eq_objectp = &pnob->event_q_obj;
574 }
575 /*
576 * Now Eth Tx Compl. queue.
577 */
578 if (pnob->txcq_len) {
579 rd.va = pnob->tx_cq;
580 rd.pa = pnob->tx_cq_bus;
581 rd.length = pnob->tx_cq_size;
582
583 status = be_cq_create(pfob, &rd,
584 pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP),
585 false, /* solicted events, */
586 tx_no_delay, /* nodelay */
587 tx_cmpl_wm, /* Watermark encodings */
588 eq_objectp, &pnob->tx_cq_obj);
589 if (status != BE_SUCCESS)
590 goto error_ret;
591
592 pnob->tx_cq_id = pnob->tx_cq_obj.cq_id;
593 pnob->tx_cq_created = 1;
594 }
595 /*
596 * Eth Tx queue
597 */
598 if (pnob->tx_q_len) {
599 struct be_eth_sq_parameters ex_params = { 0 };
600 u32 type;
601
602 if (pnob->tx_q_port) {
603 /* TXQ to be bound to a specific port */
604 type = BE_ETH_TX_RING_TYPE_BOUND;
605 ex_params.port = pnob->tx_q_port - 1;
606 } else
607 type = BE_ETH_TX_RING_TYPE_STANDARD;
608
609 rd.va = pnob->tx_q;
610 rd.pa = pnob->tx_q_bus;
611 rd.length = pnob->tx_q_size;
612
613 status = be_eth_sq_create_ex(pfob, &rd,
614 pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP),
615 type, 2, &pnob->tx_cq_obj,
616 &ex_params, &pnob->tx_q_obj);
617
618 if (status != BE_SUCCESS)
619 goto error_ret;
620
621 pnob->tx_q_id = pnob->tx_q_obj.bid;
622 pnob->tx_q_created = 1;
623 }
624 /*
625 * Now Eth Rx compl. queue. Always needed.
626 */
627 rd.va = pnob->rx_cq;
628 rd.pa = pnob->rx_cq_bus;
629 rd.length = pnob->rx_cq_size;
630
631 status = be_cq_create(pfob, &rd,
632 pnob->rx_cq_len * sizeof(struct ETH_RX_COMPL_AMAP),
633 false, /* solicted events, */
634 rx_no_delay, /* nodelay */
635 rx_cmpl_wm, /* Watermark encodings */
636 eq_objectp, &pnob->rx_cq_obj);
637 if (status != BE_SUCCESS)
638 goto error_ret;
639
640 pnob->rx_cq_id = pnob->rx_cq_obj.cq_id;
641 pnob->rx_cq_created = 1;
642
643 status = be_eth_rq_set_frag_size(pfob, pnob->rx_buf_size,
644 (u32 *) &set_rxbuf_size);
645 if (status != BE_SUCCESS) {
646 be_eth_rq_get_frag_size(pfob, (u32 *) &pnob->rx_buf_size);
647 if ((pnob->rx_buf_size != 2048) && (pnob->rx_buf_size != 4096)
648 && (pnob->rx_buf_size != 8192))
649 goto error_ret;
650 } else {
651 if (pnob->rx_buf_size != set_rxbuf_size)
652 pnob->rx_buf_size = set_rxbuf_size;
653 }
654 /*
655 * Eth RX queue. be_eth_rq_create() always assumes 2 pages size
656 */
657 rd.va = pnob->rx_q;
658 rd.pa = pnob->rx_q_bus;
659 rd.length = pnob->rx_q_size;
660
661 status = be_eth_rq_create(pfob, &rd, &pnob->rx_cq_obj,
662 &pnob->rx_cq_obj, &pnob->rx_q_obj);
663
664 if (status != BE_SUCCESS)
665 goto error_ret;
666
667 pnob->rx_q_id = pnob->rx_q_obj.rid;
668 pnob->rx_q_created = 1;
669
670 return BE_SUCCESS; /* All required queues created. */
671
672error_ret:
673 be_destroy_netobj(pnob);
674 return status;
675}
676
677static int be_nob_ring_init(struct be_adapter *adapter,
678 struct be_net_object *pnob)
679{
680 int status;
681
682 pnob->event_q_tl = 0;
683
684 pnob->tx_q_hd = 0;
685 pnob->tx_q_tl = 0;
686
687 pnob->tx_cq_tl = 0;
688
689 pnob->rx_cq_tl = 0;
690
691 memset(pnob->event_q, 0, pnob->event_q_size);
692 memset(pnob->tx_cq, 0, pnob->tx_cq_size);
693 memset(pnob->tx_ctxt, 0, pnob->tx_q_len * sizeof(void **));
694 memset(pnob->rx_ctxt, 0, pnob->rx_q_len * sizeof(void *));
695 pnob->rx_pg_info_hd = 0;
696 pnob->rx_q_hd = 0;
697 atomic_set(&pnob->rx_q_posted, 0);
698
699 status = be_create_netobj(pnob, adapter->csr_va, adapter->db_va,
700 adapter->pci_va);
701 if (status != BE_SUCCESS)
702 return -1;
703
704 be_post_eth_rx_buffs(pnob);
705 return 0;
706}
707
708/* This function handles async callback for link status */
709static void
710be_link_status_async_callback(void *context, u32 event_code, void *event)
711{
712 struct ASYNC_EVENT_LINK_STATE_AMAP *link_status = event;
713 struct be_adapter *adapter = context;
714 bool link_enable = false;
715 struct be_net_object *pnob;
716 struct ASYNC_EVENT_TRAILER_AMAP *async_trailer;
717 struct net_device *netdev;
718 u32 async_event_code, async_event_type, active_port;
719 u32 port0_link_status, port1_link_status, port0_duplex, port1_duplex;
720 u32 port0_speed, port1_speed;
721
722 if (event_code != ASYNC_EVENT_CODE_LINK_STATE) {
723 /* Not our event to handle */
724 return;
725 }
726 async_trailer = (struct ASYNC_EVENT_TRAILER_AMAP *)
727 ((u8 *) event + sizeof(struct MCC_CQ_ENTRY_AMAP) -
728 sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
729
730 async_event_code = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_code,
731 async_trailer);
732 BUG_ON(async_event_code != ASYNC_EVENT_CODE_LINK_STATE);
733
734 pnob = adapter->net_obj;
735 netdev = pnob->netdev;
736
737 /* Determine if this event is a switch VLD or a physical link event */
738 async_event_type = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_type,
739 async_trailer);
740 active_port = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
741 active_port, link_status);
742 port0_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
743 port0_link_status, link_status);
744 port1_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
745 port1_link_status, link_status);
746 port0_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
747 port0_duplex, link_status);
748 port1_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
749 port1_duplex, link_status);
750 port0_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
751 port0_speed, link_status);
752 port1_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
753 port1_speed, link_status);
754 if (async_event_type == NTWK_LINK_TYPE_VIRTUAL) {
755 adapter->be_stat.bes_link_change_virtual++;
756 if (adapter->be_link_sts->active_port != active_port) {
757 dev_notice(&netdev->dev,
758 "Active port changed due to VLD on switch\n");
759 } else {
760 dev_notice(&netdev->dev, "Link status update\n");
761 }
762
763 } else {
764 adapter->be_stat.bes_link_change_physical++;
765 if (adapter->be_link_sts->active_port != active_port) {
766 dev_notice(&netdev->dev,
767 "Active port changed due to port link"
768 " status change\n");
769 } else {
770 dev_notice(&netdev->dev, "Link status update\n");
771 }
772 }
773
774 memset(adapter->be_link_sts, 0, sizeof(adapter->be_link_sts));
775
776 if ((port0_link_status == ASYNC_EVENT_LINK_UP) ||
777 (port1_link_status == ASYNC_EVENT_LINK_UP)) {
778 if ((adapter->port0_link_sts == BE_PORT_LINK_DOWN) &&
779 (adapter->port1_link_sts == BE_PORT_LINK_DOWN)) {
780 /* Earlier both the ports are down So link is up */
781 link_enable = true;
782 }
783
784 if (port0_link_status == ASYNC_EVENT_LINK_UP) {
785 adapter->port0_link_sts = BE_PORT_LINK_UP;
786 adapter->be_link_sts->mac0_duplex = port0_duplex;
787 adapter->be_link_sts->mac0_speed = port0_speed;
788 if (active_port == NTWK_PORT_A)
789 adapter->be_link_sts->active_port = 0;
790 } else
791 adapter->port0_link_sts = BE_PORT_LINK_DOWN;
792
793 if (port1_link_status == ASYNC_EVENT_LINK_UP) {
794 adapter->port1_link_sts = BE_PORT_LINK_UP;
795 adapter->be_link_sts->mac1_duplex = port1_duplex;
796 adapter->be_link_sts->mac1_speed = port1_speed;
797 if (active_port == NTWK_PORT_B)
798 adapter->be_link_sts->active_port = 1;
799 } else
800 adapter->port1_link_sts = BE_PORT_LINK_DOWN;
801
802 printk(KERN_INFO "Link Properties for %s:\n", netdev->name);
803 dev_info(&netdev->dev, "Link Properties:\n");
804 be_print_link_info(adapter->be_link_sts);
805
806 if (!link_enable)
807 return;
808 /*
809 * Both ports were down previously, but atleast one of
810 * them has come up if this netdevice's carrier is not up,
811 * then indicate to stack
812 */
813 if (!netif_carrier_ok(netdev)) {
814 netif_start_queue(netdev);
815 netif_carrier_on(netdev);
816 }
817 return;
818 }
819
820 /* Now both the ports are down. Tell the stack about it */
821 dev_info(&netdev->dev, "Both ports are down\n");
822 adapter->port0_link_sts = BE_PORT_LINK_DOWN;
823 adapter->port1_link_sts = BE_PORT_LINK_DOWN;
824 if (netif_carrier_ok(netdev)) {
825 netif_carrier_off(netdev);
826 netif_stop_queue(netdev);
827 }
828 return;
829}
830
831static int be_mcc_create(struct be_adapter *adapter)
832{
833 struct be_net_object *pnob;
834
835 pnob = adapter->net_obj;
836 /*
837 * Create the MCC ring so that all further communication with
838 * MCC can go thru the ring. we do this at the end since
839 * we do not want to be dealing with interrupts until the
840 * initialization is complete.
841 */
842 pnob->mcc_q_len = MCC_Q_LEN;
843 pnob->mcc_q_size = pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP);
844 pnob->mcc_q = pci_alloc_consistent(adapter->pdev, pnob->mcc_q_size,
845 &pnob->mcc_q_bus);
846 if (!pnob->mcc_q_bus)
847 return -1;
848 /*
849 * space for MCC WRB context
850 */
851 pnob->mcc_wrb_ctxtLen = MCC_Q_LEN;
852 pnob->mcc_wrb_ctxt_size = pnob->mcc_wrb_ctxtLen *
853 sizeof(struct be_mcc_wrb_context);
854 pnob->mcc_wrb_ctxt = (void *)__get_free_pages(GFP_KERNEL,
855 get_order(pnob->mcc_wrb_ctxt_size));
856 if (pnob->mcc_wrb_ctxt == NULL)
857 return -1;
858 /*
859 * Space for MCC compl. ring
860 */
861 pnob->mcc_cq_len = MCC_CQ_LEN;
862 pnob->mcc_cq_size = pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP);
863 pnob->mcc_cq = pci_alloc_consistent(adapter->pdev, pnob->mcc_cq_size,
864 &pnob->mcc_cq_bus);
865 if (!pnob->mcc_cq_bus)
866 return -1;
867 return 0;
868}
869
870/*
871 This function creates the MCC request and completion ring required
872 for communicating with the ARM processor. The caller must have
873 allocated required amount of memory for the MCC ring and MCC
874 completion ring and posted the virtual address and number of
875 entries in the corresponding members (mcc_q and mcc_cq) in the
876 NetObject struture.
877
878 When this call is completed, all further communication with
879 ARM will switch from mailbox to this ring.
880
881 pnob - Pointer to the NetObject structure. This NetObject should
882 have been created using a previous call to be_create_netobj()
883*/
884int be_create_mcc_rings(struct be_net_object *pnob)
885{
886 int status = 0;
887 struct ring_desc rd;
888 struct be_function_object *pfob = &pnob->fn_obj;
889
890 memset(&rd, 0, sizeof(struct ring_desc));
891 if (pnob->mcc_cq_len) {
892 rd.va = pnob->mcc_cq;
893 rd.pa = pnob->mcc_cq_bus;
894 rd.length = pnob->mcc_cq_size;
895
896 status = be_cq_create(pfob, &rd,
897 pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP),
898 false, /* solicted events, */
899 true, /* nodelay */
900 0, /* 0 Watermark since Nodelay is true */
901 &pnob->event_q_obj,
902 &pnob->mcc_cq_obj);
903
904 if (status != BE_SUCCESS)
905 return status;
906
907 pnob->mcc_cq_id = pnob->mcc_cq_obj.cq_id;
908 pnob->mcc_cq_created = 1;
909 }
910 if (pnob->mcc_q_len) {
911 rd.va = pnob->mcc_q;
912 rd.pa = pnob->mcc_q_bus;
913 rd.length = pnob->mcc_q_size;
914
915 status = be_mcc_ring_create(pfob, &rd,
916 pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP),
917 pnob->mcc_wrb_ctxt, pnob->mcc_wrb_ctxtLen,
918 &pnob->mcc_cq_obj, &pnob->mcc_q_obj);
919
920 if (status != BE_SUCCESS)
921 return status;
922
923 pnob->mcc_q_created = 1;
924 }
925 return BE_SUCCESS;
926}
927
928static int be_mcc_init(struct be_adapter *adapter)
929{
930 u32 r;
931 struct be_net_object *pnob;
932
933 pnob = adapter->net_obj;
934 memset(pnob->mcc_q, 0, pnob->mcc_q_size);
935 pnob->mcc_q_hd = 0;
936
937 memset(pnob->mcc_wrb_ctxt, 0, pnob->mcc_wrb_ctxt_size);
938
939 memset(pnob->mcc_cq, 0, pnob->mcc_cq_size);
940 pnob->mcc_cq_tl = 0;
941
942 r = be_create_mcc_rings(adapter->net_obj);
943 if (r != BE_SUCCESS)
944 return -1;
945
946 return 0;
947}
948
949static void be_remove(struct pci_dev *pdev)
950{
951 struct be_net_object *pnob;
952 struct be_adapter *adapter;
953
954 adapter = pci_get_drvdata(pdev);
955 if (!adapter)
956 return;
957
958 pci_set_drvdata(pdev, NULL);
959 pnob = (struct be_net_object *)adapter->net_obj;
960
961 flush_scheduled_work();
962
963 if (pnob) {
964 /* Unregister async callback function for link status updates */
965 if (pnob->mcc_q_created)
966 be_mcc_add_async_event_callback(&pnob->mcc_q_obj,
967 NULL, NULL);
968 netobject_cleanup(adapter, pnob);
969 }
970
971 if (adapter->csr_va)
972 iounmap(adapter->csr_va);
973 if (adapter->db_va)
974 iounmap(adapter->db_va);
975 if (adapter->pci_va)
976 iounmap(adapter->pci_va);
977
978 pci_release_regions(adapter->pdev);
979 pci_disable_device(adapter->pdev);
980
981 kfree(adapter->be_link_sts);
982 kfree(adapter->eth_statsp);
983
984 if (adapter->timer_ctxt.get_stats_timer.function)
985 del_timer_sync(&adapter->timer_ctxt.get_stats_timer);
986 kfree(adapter);
987}
988
989/*
990 * This function is called by the PCI sub-system when it finds a PCI
991 * device with dev/vendor IDs that match with one of our devices.
992 * All of the driver initialization is done in this function.
993 */
994static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
995{
996 int status = 0;
997 struct be_adapter *adapter;
998 struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD get_fwv;
999 struct be_net_object *pnob;
1000 struct net_device *netdev;
1001
1002 status = pci_enable_device(pdev);
1003 if (status)
1004 goto error;
1005
1006 status = pci_request_regions(pdev, be_driver_name);
1007 if (status)
1008 goto error_pci_req;
1009
1010 pci_set_master(pdev);
1011 adapter = kzalloc(sizeof(struct be_adapter), GFP_KERNEL);
1012 if (adapter == NULL) {
1013 status = -ENOMEM;
1014 goto error_adapter;
1015 }
1016 adapter->dev_state = BE_DEV_STATE_NONE;
1017 adapter->pdev = pdev;
1018 pci_set_drvdata(pdev, adapter);
1019
1020 adapter->enable_aic = 1;
1021 adapter->max_eqd = MAX_EQD;
1022 adapter->min_eqd = 0;
1023 adapter->cur_eqd = 0;
1024
1025 status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1026 if (!status) {
1027 adapter->dma_64bit_cap = true;
1028 } else {
1029 adapter->dma_64bit_cap = false;
1030 status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1031 if (status != 0) {
1032 printk(KERN_ERR "Could not set PCI DMA Mask\n");
1033 goto cleanup;
1034 }
1035 }
1036
1037 status = init_pci_be_function(adapter, pdev);
1038 if (status != 0) {
1039 printk(KERN_ERR "Failed to map PCI BARS\n");
1040 status = -ENOMEM;
1041 goto cleanup;
1042 }
1043
1044 be_trace_set_level(DL_ALWAYS | DL_ERR);
1045
1046 adapter->be_link_sts = kmalloc(sizeof(struct BE_LINK_STATUS),
1047 GFP_KERNEL);
1048 if (adapter->be_link_sts == NULL) {
1049 printk(KERN_ERR "Memory allocation for link status "
1050 "buffer failed\n");
1051 goto cleanup;
1052 }
1053 spin_lock_init(&adapter->txq_lock);
1054
1055 netdev = alloc_etherdev(sizeof(struct be_net_object));
1056 if (netdev == NULL) {
1057 status = -ENOMEM;
1058 goto cleanup;
1059 }
1060 pnob = netdev_priv(netdev);
1061 adapter->net_obj = pnob;
1062 adapter->netdevp = netdev;
1063 pnob->adapter = adapter;
1064 pnob->netdev = netdev;
1065
1066 status = be_nob_ring_alloc(adapter, pnob);
1067 if (status != 0)
1068 goto cleanup;
1069
1070 status = be_nob_ring_init(adapter, pnob);
1071 if (status != 0)
1072 goto cleanup;
1073
1074 be_rxf_mac_address_read_write(&pnob->fn_obj, false, false, false,
1075 false, false, netdev->dev_addr, NULL, NULL);
1076
1077 netdev->init = &benet_init;
1078 netif_carrier_off(netdev);
1079 netif_stop_queue(netdev);
1080
1081 SET_NETDEV_DEV(netdev, &(adapter->pdev->dev));
1082
1083 netif_napi_add(netdev, &pnob->napi, be_poll, 64);
1084
1085 /* if the rx_frag size if 2K, one page is shared as two RX frags */
1086 pnob->rx_pg_shared =
1087 (pnob->rx_buf_size <= PAGE_SIZE / 2) ? true : false;
1088 if (pnob->rx_buf_size != rxbuf_size) {
1089 printk(KERN_WARNING
1090 "Could not set Rx buffer size to %d. Using %d\n",
1091 rxbuf_size, pnob->rx_buf_size);
1092 rxbuf_size = pnob->rx_buf_size;
1093 }
1094
1095 tasklet_init(&(adapter->sts_handler), be_process_intr,
1096 (unsigned long)adapter);
1097 adapter->tasklet_started = 1;
1098 spin_lock_init(&(adapter->int_lock));
1099
1100 status = be_register_isr(adapter, pnob);
1101 if (status != 0)
1102 goto cleanup;
1103
1104 adapter->rx_csum = 1;
1105 adapter->max_rx_coal = BE_LRO_MAX_PKTS;
1106
1107 memset(&get_fwv, 0,
1108 sizeof(struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD));
1109 printk(KERN_INFO "BladeEngine Driver version:%s. "
1110 "Copyright ServerEngines, Corporation 2005 - 2008\n",
1111 be_drvr_ver);
1112 status = be_function_get_fw_version(&pnob->fn_obj, &get_fwv, NULL,
1113 NULL);
1114 if (status == BE_SUCCESS) {
1115 strncpy(be_fw_ver, get_fwv.firmware_version_string, 32);
1116 printk(KERN_INFO "BladeEngine Firmware Version:%s\n",
1117 get_fwv.firmware_version_string);
1118 } else {
1119 printk(KERN_WARNING "Unable to get BE Firmware Version\n");
1120 }
1121
1122 sema_init(&adapter->get_eth_stat_sem, 0);
1123 init_timer(&adapter->timer_ctxt.get_stats_timer);
1124 atomic_set(&adapter->timer_ctxt.get_stat_flag, 0);
1125 adapter->timer_ctxt.get_stats_timer.function =
1126 &be_get_stats_timer_handler;
1127
1128 status = be_mcc_create(adapter);
1129 if (status < 0)
1130 goto cleanup;
1131 status = be_mcc_init(adapter);
1132 if (status < 0)
1133 goto cleanup;
1134
1135
1136 status = be_mcc_add_async_event_callback(&adapter->net_obj->mcc_q_obj,
1137 be_link_status_async_callback, (void *)adapter);
1138 if (status != BE_SUCCESS) {
1139 printk(KERN_WARNING "add_async_event_callback failed");
1140 printk(KERN_WARNING
1141 "Link status changes may not be reflected\n");
1142 }
1143
1144 status = register_netdev(netdev);
1145 if (status != 0)
1146 goto cleanup;
1147 be_update_link_status(adapter);
1148 adapter->dev_state = BE_DEV_STATE_INIT;
1149 return 0;
1150
1151cleanup:
1152 be_remove(pdev);
1153 return status;
1154error_adapter:
1155 pci_release_regions(pdev);
1156error_pci_req:
1157 pci_disable_device(pdev);
1158error:
1159 printk(KERN_ERR "BladeEngine initalization failed\n");
1160 return status;
1161}
1162
1163/*
1164 * Get the current link status and print the status on console
1165 */
1166void be_update_link_status(struct be_adapter *adapter)
1167{
1168 int status;
1169 struct be_net_object *pnob = adapter->net_obj;
1170
1171 status = be_rxf_link_status(&pnob->fn_obj, adapter->be_link_sts, NULL,
1172 NULL, NULL);
1173 if (status == BE_SUCCESS) {
1174 if (adapter->be_link_sts->mac0_speed &&
1175 adapter->be_link_sts->mac0_duplex)
1176 adapter->port0_link_sts = BE_PORT_LINK_UP;
1177 else
1178 adapter->port0_link_sts = BE_PORT_LINK_DOWN;
1179
1180 if (adapter->be_link_sts->mac1_speed &&
1181 adapter->be_link_sts->mac1_duplex)
1182 adapter->port1_link_sts = BE_PORT_LINK_UP;
1183 else
1184 adapter->port1_link_sts = BE_PORT_LINK_DOWN;
1185
1186 dev_info(&pnob->netdev->dev, "Link Properties:\n");
1187 be_print_link_info(adapter->be_link_sts);
1188 return;
1189 }
1190 dev_info(&pnob->netdev->dev, "Could not get link status\n");
1191 return;
1192}
1193
1194
1195#ifdef CONFIG_PM
1196static void
1197be_pm_cleanup(struct be_adapter *adapter,
1198 struct be_net_object *pnob, struct net_device *netdev)
1199{
1200 netif_carrier_off(netdev);
1201 netif_stop_queue(netdev);
1202
1203 be_wait_nic_tx_cmplx_cmpl(pnob);
1204 be_disable_eq_intr(pnob);
1205
1206 if (adapter->tasklet_started) {
1207 tasklet_kill(&adapter->sts_handler);
1208 adapter->tasklet_started = 0;
1209 }
1210
1211 be_unregister_isr(adapter);
1212 be_disable_intr(pnob);
1213
1214 be_tx_q_clean(pnob);
1215 be_rx_q_clean(pnob);
1216
1217 be_destroy_netobj(pnob);
1218}
1219
1220static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1221{
1222 struct be_adapter *adapter = pci_get_drvdata(pdev);
1223 struct net_device *netdev = adapter->netdevp;
1224 struct be_net_object *pnob = netdev_priv(netdev);
1225
1226 adapter->dev_pm_state = adapter->dev_state;
1227 adapter->dev_state = BE_DEV_STATE_SUSPEND;
1228
1229 netif_device_detach(netdev);
1230 if (netif_running(netdev))
1231 be_pm_cleanup(adapter, pnob, netdev);
1232
1233 pci_enable_wake(pdev, 3, 1);
1234 pci_enable_wake(pdev, 4, 1); /* D3 Cold = 4 */
1235 pci_save_state(pdev);
1236 pci_disable_device(pdev);
1237 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1238 return 0;
1239}
1240
1241static void be_up(struct be_adapter *adapter)
1242{
1243 struct be_net_object *pnob = adapter->net_obj;
1244
1245 if (pnob->num_vlans != 0)
1246 be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
1247 pnob->vlan_tag, NULL, NULL, NULL);
1248
1249}
1250
1251static int be_resume(struct pci_dev *pdev)
1252{
1253 int status = 0;
1254 struct be_adapter *adapter = pci_get_drvdata(pdev);
1255 struct net_device *netdev = adapter->netdevp;
1256 struct be_net_object *pnob = netdev_priv(netdev);
1257
1258 netif_device_detach(netdev);
1259
1260 status = pci_enable_device(pdev);
1261 if (status)
1262 return status;
1263
1264 pci_set_power_state(pdev, 0);
1265 pci_restore_state(pdev);
1266 pci_enable_wake(pdev, 3, 0);
1267 pci_enable_wake(pdev, 4, 0); /* 4 is D3 cold */
1268
1269 netif_carrier_on(netdev);
1270 netif_start_queue(netdev);
1271
1272 if (netif_running(netdev)) {
1273 be_rxf_mac_address_read_write(&pnob->fn_obj, false, false,
1274 false, true, false, netdev->dev_addr, NULL, NULL);
1275
1276 status = be_nob_ring_init(adapter, pnob);
1277 if (status < 0)
1278 return status;
1279
1280 tasklet_init(&(adapter->sts_handler), be_process_intr,
1281 (unsigned long)adapter);
1282 adapter->tasklet_started = 1;
1283
1284 if (be_register_isr(adapter, pnob) != 0) {
1285 printk(KERN_ERR "be_register_isr failed\n");
1286 return status;
1287 }
1288
1289
1290 status = be_mcc_init(adapter);
1291 if (status < 0) {
1292 printk(KERN_ERR "be_mcc_init failed\n");
1293 return status;
1294 }
1295 be_update_link_status(adapter);
1296 /*
1297 * Register async call back function to handle link
1298 * status updates
1299 */
1300 status = be_mcc_add_async_event_callback(
1301 &adapter->net_obj->mcc_q_obj,
1302 be_link_status_async_callback, (void *)adapter);
1303 if (status != BE_SUCCESS) {
1304 printk(KERN_WARNING "add_async_event_callback failed");
1305 printk(KERN_WARNING
1306 "Link status changes may not be reflected\n");
1307 }
1308 be_enable_intr(pnob);
1309 be_enable_eq_intr(pnob);
1310 be_up(adapter);
1311 }
1312 netif_device_attach(netdev);
1313 adapter->dev_state = adapter->dev_pm_state;
1314 return 0;
1315
1316}
1317
1318#endif
1319
1320/* Wait until no more pending transmits */
1321void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *pnob)
1322{
1323 int i;
1324
1325 /* Wait for 20us * 50000 (= 1s) and no more */
1326 i = 0;
1327 while ((pnob->tx_q_tl != pnob->tx_q_hd) && (i < 50000)) {
1328 ++i;
1329 udelay(20);
1330 }
1331
1332 /* Check for no more pending transmits */
1333 if (i >= 50000) {
1334 printk(KERN_WARNING
1335 "Did not receive completions for all TX requests\n");
1336 }
1337}
1338
1339static struct pci_driver be_driver = {
1340 .name = be_driver_name,
1341 .id_table = be_device_id_table,
1342 .probe = be_probe,
1343#ifdef CONFIG_PM
1344 .suspend = be_suspend,
1345 .resume = be_resume,
1346#endif
1347 .remove = be_remove
1348};
1349
1350/*
1351 * Module init entry point. Registers our our device and return.
1352 * Our probe will be called if the device is found.
1353 */
1354static int __init be_init_module(void)
1355{
1356 int ret;
1357
1358 if (rxbuf_size != 8192 && rxbuf_size != 4096 && rxbuf_size != 2048) {
1359 printk(KERN_WARNING
1360 "Unsupported receive buffer size (%d) requested\n",
1361 rxbuf_size);
1362 printk(KERN_WARNING
1363 "Must be 2048, 4096 or 8192. Defaulting to 2048\n");
1364 rxbuf_size = 2048;
1365 }
1366
1367 ret = pci_register_driver(&be_driver);
1368
1369 return ret;
1370}
1371
1372module_init(be_init_module);
1373
1374/*
1375 * be_exit_module - Driver Exit Cleanup Routine
1376 */
1377static void __exit be_exit_module(void)
1378{
1379 pci_unregister_driver(&be_driver);
1380}
1381
1382module_exit(be_exit_module);
diff --git a/drivers/staging/benet/be_int.c b/drivers/staging/benet/be_int.c
deleted file mode 100644
index cba95d09a8b6..000000000000
--- a/drivers/staging/benet/be_int.c
+++ /dev/null
@@ -1,863 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#include <linux/if_vlan.h>
18#include <linux/inet_lro.h>
19
20#include "benet.h"
21
22/* number of bytes of RX frame that are copied to skb->data */
23#define BE_HDR_LEN 64
24
25#define NETIF_RX(skb) netif_receive_skb(skb)
26#define VLAN_ACCEL_RX(skb, pnob, vt) \
27 vlan_hwaccel_rx(skb, pnob->vlan_grp, vt)
28
29/*
30 This function notifies BladeEngine of the number of completion
31 entries processed from the specified completion queue by writing
32 the number of popped entries to the door bell.
33
34 pnob - Pointer to the NetObject structure
35 n - Number of completion entries processed
36 cq_id - Queue ID of the completion queue for which notification
37 is being done.
38 re_arm - 1 - rearm the completion ring to generate an event.
39 - 0 - dont rearm the completion ring to generate an event
40*/
41void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm)
42{
43 struct CQ_DB_AMAP cqdb;
44
45 cqdb.dw[0] = 0;
46 AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id);
47 AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm);
48 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n);
49 PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
50}
51
52/*
53 * adds additional receive frags indicated by BE starting from given
54 * frag index (fi) to specified skb's frag list
55 */
56static void
57add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb,
58 u32 nresid, u32 fi)
59{
60 struct be_adapter *adapter = pnob->adapter;
61 u32 sk_frag_idx, n;
62 struct be_rx_page_info *rx_page_info;
63 u32 frag_sz = pnob->rx_buf_size;
64
65 sk_frag_idx = skb_shinfo(skb)->nr_frags;
66 while (nresid) {
67 index_inc(&fi, pnob->rx_q_len);
68
69 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
70 pnob->rx_ctxt[fi] = NULL;
71 if ((rx_page_info->page_offset) ||
72 (pnob->rx_pg_shared == false)) {
73 pci_unmap_page(adapter->pdev,
74 pci_unmap_addr(rx_page_info, bus),
75 frag_sz, PCI_DMA_FROMDEVICE);
76 }
77
78 n = min(nresid, frag_sz);
79 skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page;
80 skb_shinfo(skb)->frags[sk_frag_idx].page_offset
81 = rx_page_info->page_offset;
82 skb_shinfo(skb)->frags[sk_frag_idx].size = n;
83
84 sk_frag_idx++;
85 skb->len += n;
86 skb->data_len += n;
87 skb_shinfo(skb)->nr_frags++;
88 nresid -= n;
89
90 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
91 atomic_dec(&pnob->rx_q_posted);
92 }
93}
94
95/*
96 * This function processes incoming nic packets over various Rx queues.
97 * This function takes the adapter, the current Rx status descriptor
98 * entry and the Rx completion queue ID as argument.
99 */
100static inline int process_nic_rx_completion(struct be_net_object *pnob,
101 struct ETH_RX_COMPL_AMAP *rxcp)
102{
103 struct be_adapter *adapter = pnob->adapter;
104 struct sk_buff *skb;
105 int udpcksm, tcpcksm;
106 int n;
107 u32 nresid, fi;
108 u32 frag_sz = pnob->rx_buf_size;
109 u8 *va;
110 struct be_rx_page_info *rx_page_info;
111 u32 numfrags, vtp, vtm, vlan_tag, pktsize;
112
113 fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
114 BUG_ON(fi >= (int)pnob->rx_q_len);
115 BUG_ON(fi < 0);
116
117 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
118 BUG_ON(!rx_page_info->page);
119 pnob->rx_ctxt[fi] = NULL;
120
121 /*
122 * If one page is used per fragment or if this is the second half of
123 * of the page, unmap the page here
124 */
125 if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) {
126 pci_unmap_page(adapter->pdev,
127 pci_unmap_addr(rx_page_info, bus), frag_sz,
128 PCI_DMA_FROMDEVICE);
129 }
130
131 atomic_dec(&pnob->rx_q_posted);
132 udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
133 tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
134 pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
135 /*
136 * get rid of RX flush completions first.
137 */
138 if ((tcpcksm) && (udpcksm) && (pktsize == 32)) {
139 put_page(rx_page_info->page);
140 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
141 return 0;
142 }
143 skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN);
144 if (skb == NULL) {
145 dev_info(&pnob->netdev->dev, "alloc_skb() failed\n");
146 put_page(rx_page_info->page);
147 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
148 goto free_frags;
149 }
150 skb_reserve(skb, NET_IP_ALIGN);
151
152 skb->dev = pnob->netdev;
153
154 n = min(pktsize, frag_sz);
155
156 va = page_address(rx_page_info->page) + rx_page_info->page_offset;
157 prefetch(va);
158
159 skb->len = n;
160 skb->data_len = n;
161 if (n <= BE_HDR_LEN) {
162 memcpy(skb->data, va, n);
163 put_page(rx_page_info->page);
164 skb->data_len -= n;
165 skb->tail += n;
166 } else {
167
168 /* Setup the SKB with page buffer information */
169 skb_shinfo(skb)->frags[0].page = rx_page_info->page;
170 skb_shinfo(skb)->nr_frags++;
171
172 /* Copy the header into the skb_data */
173 memcpy(skb->data, va, BE_HDR_LEN);
174 skb_shinfo(skb)->frags[0].page_offset =
175 rx_page_info->page_offset + BE_HDR_LEN;
176 skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN;
177 skb->data_len -= BE_HDR_LEN;
178 skb->tail += BE_HDR_LEN;
179 }
180 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
181 nresid = pktsize - n;
182
183 skb->protocol = eth_type_trans(skb, pnob->netdev);
184
185 if ((tcpcksm || udpcksm) && adapter->rx_csum)
186 skb->ip_summed = CHECKSUM_UNNECESSARY;
187 else
188 skb->ip_summed = CHECKSUM_NONE;
189 /*
190 * if we have more bytes left, the frame has been
191 * given to us in multiple fragments. This happens
192 * with Jumbo frames. Add the remaining fragments to
193 * skb->frags[] array.
194 */
195 if (nresid)
196 add_skb_frags(pnob, skb, nresid, fi);
197
198 /* update the the true size of the skb. */
199 skb->truesize = skb->len + sizeof(struct sk_buff);
200
201 /*
202 * If a 802.3 frame or 802.2 LLC frame
203 * (i.e) contains length field in MAC Hdr
204 * and frame len is greater than 64 bytes
205 */
206 if (((skb->protocol == ntohs(ETH_P_802_2)) ||
207 (skb->protocol == ntohs(ETH_P_802_3)))
208 && (pktsize > BE_HDR_LEN)) {
209 /*
210 * If the length given in Mac Hdr is less than frame size
211 * Erraneous frame, Drop it
212 */
213 if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) {
214 /* Increment Non Ether type II frames dropped */
215 adapter->be_stat.bes_802_3_dropped_frames++;
216
217 kfree_skb(skb);
218 return 0;
219 }
220 /*
221 * else if the length given in Mac Hdr is greater than
222 * frame size, should not be seeing this sort of frames
223 * dump the pkt and pass to stack
224 */
225 else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) {
226 /* Increment Non Ether type II frames malformed */
227 adapter->be_stat.bes_802_3_malformed_frames++;
228 }
229 }
230
231 vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
232 vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
233 if (vtp && vtm) {
234 /* Vlan tag present in pkt and BE found
235 * that the tag matched an entry in VLAN table
236 */
237 if (!pnob->vlan_grp || pnob->num_vlans == 0) {
238 /* But we have no VLANs configured.
239 * This should never happen. Drop the packet.
240 */
241 dev_info(&pnob->netdev->dev,
242 "BladeEngine: Unexpected vlan tagged packet\n");
243 kfree_skb(skb);
244 return 0;
245 }
246 /* pass the VLAN packet to stack */
247 vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
248 VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag));
249
250 } else {
251 NETIF_RX(skb);
252 }
253 return 0;
254
255free_frags:
256 /* free all frags associated with the current rxcp */
257 numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
258 while (numfrags-- > 1) {
259 index_inc(&fi, pnob->rx_q_len);
260
261 rx_page_info = (struct be_rx_page_info *)
262 pnob->rx_ctxt[fi];
263 pnob->rx_ctxt[fi] = (void *)NULL;
264 if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
265 pci_unmap_page(adapter->pdev,
266 pci_unmap_addr(rx_page_info, bus),
267 frag_sz, PCI_DMA_FROMDEVICE);
268 }
269
270 put_page(rx_page_info->page);
271 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
272 atomic_dec(&pnob->rx_q_posted);
273 }
274 return -ENOMEM;
275}
276
277static void process_nic_rx_completion_lro(struct be_net_object *pnob,
278 struct ETH_RX_COMPL_AMAP *rxcp)
279{
280 struct be_adapter *adapter = pnob->adapter;
281 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
282 unsigned int udpcksm, tcpcksm;
283 u32 numfrags, vlanf, vtm, vlan_tag, nresid;
284 u16 vlant;
285 unsigned int fi, idx, n;
286 struct be_rx_page_info *rx_page_info;
287 u32 frag_sz = pnob->rx_buf_size, pktsize;
288 bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1;
289 u8 err, *va;
290 __wsum csum = 0;
291
292 if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) {
293 /* Drop the pkt and move to the next completion. */
294 adapter->be_stat.bes_rx_misc_pkts++;
295 return;
296 }
297 err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp);
298 if (err || !rx_coal) {
299 /* We won't coalesce Rx pkts if the err bit set.
300 * take the path of normal completion processing */
301 process_nic_rx_completion(pnob, rxcp);
302 return;
303 }
304
305 fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
306 BUG_ON(fi >= (int)pnob->rx_q_len);
307 BUG_ON(fi < 0);
308 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
309 BUG_ON(!rx_page_info->page);
310 pnob->rx_ctxt[fi] = (void *)NULL;
311 /* If one page is used per fragment or if this is the
312 * second half of the page, unmap the page here
313 */
314 if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
315 pci_unmap_page(adapter->pdev,
316 pci_unmap_addr(rx_page_info, bus),
317 frag_sz, PCI_DMA_FROMDEVICE);
318 }
319
320 numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
321 udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
322 tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
323 vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
324 vlant = be16_to_cpu(vlan_tag);
325 vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
326 vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
327 pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
328
329 atomic_dec(&pnob->rx_q_posted);
330
331 if (tcpcksm && udpcksm && pktsize == 32) {
332 /* flush completion entries */
333 put_page(rx_page_info->page);
334 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
335 return;
336 }
337 /* Only one of udpcksum and tcpcksum can be set */
338 BUG_ON(udpcksm && tcpcksm);
339
340 /* jumbo frames could come in multiple fragments */
341 BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz));
342 n = min(pktsize, frag_sz);
343 nresid = pktsize - n; /* will be useful for jumbo pkts */
344 idx = 0;
345
346 va = page_address(rx_page_info->page) + rx_page_info->page_offset;
347 prefetch(va);
348 rx_frags[idx].page = rx_page_info->page;
349 rx_frags[idx].page_offset = (rx_page_info->page_offset);
350 rx_frags[idx].size = n;
351 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
352
353 /* If we got multiple fragments, we have more data. */
354 while (nresid) {
355 idx++;
356 index_inc(&fi, pnob->rx_q_len);
357
358 rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
359 pnob->rx_ctxt[fi] = (void *)NULL;
360 if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
361 pci_unmap_page(adapter->pdev,
362 pci_unmap_addr(rx_page_info, bus),
363 frag_sz, PCI_DMA_FROMDEVICE);
364 }
365
366 n = min(nresid, frag_sz);
367 rx_frags[idx].page = rx_page_info->page;
368 rx_frags[idx].page_offset = (rx_page_info->page_offset);
369 rx_frags[idx].size = n;
370
371 nresid -= n;
372 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
373 atomic_dec(&pnob->rx_q_posted);
374 }
375
376 if (likely(!(vlanf && vtm))) {
377 lro_receive_frags(&pnob->lro_mgr, rx_frags,
378 pktsize, pktsize,
379 (void *)(unsigned long)csum, csum);
380 } else {
381 /* Vlan tag present in pkt and BE found
382 * that the tag matched an entry in VLAN table
383 */
384 if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) {
385 /* But we have no VLANs configured.
386 * This should never happen. Drop the packet.
387 */
388 dev_info(&pnob->netdev->dev,
389 "BladeEngine: Unexpected vlan tagged packet\n");
390 return;
391 }
392 /* pass the VLAN packet to stack */
393 lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr,
394 rx_frags, pktsize, pktsize,
395 pnob->vlan_grp, vlant,
396 (void *)(unsigned long)csum,
397 csum);
398 }
399
400 adapter->be_stat.bes_rx_coal++;
401}
402
403struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob)
404{
405 struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl];
406 u32 valid, ct;
407
408 valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp);
409 if (valid == 0)
410 return NULL;
411
412 ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp);
413 if (ct != 0) {
414 /* Invalid chute #. treat as error */
415 AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1);
416 }
417
418 be_adv_rxcq_tl(pnob);
419 AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0);
420 return rxcp;
421}
422
423static void update_rx_rate(struct be_adapter *adapter)
424{
425 /* update the rate once in two seconds */
426 if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) {
427 u32 r;
428 r = adapter->eth_rx_bytes /
429 ((jiffies - adapter->eth_rx_jiffies) / (HZ));
430 r = (r / 1000000); /* MB/Sec */
431
432 /* Mega Bits/Sec */
433 adapter->be_stat.bes_eth_rx_rate = (r * 8);
434 adapter->eth_rx_jiffies = jiffies;
435 adapter->eth_rx_bytes = 0;
436 }
437}
438
439static int process_rx_completions(struct be_net_object *pnob, int max_work)
440{
441 struct be_adapter *adapter = pnob->adapter;
442 struct ETH_RX_COMPL_AMAP *rxcp;
443 u32 nc = 0;
444 unsigned int pktsize;
445
446 while (max_work && (rxcp = be_get_rx_cmpl(pnob))) {
447 prefetch(rxcp);
448 pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
449 process_nic_rx_completion_lro(pnob, rxcp);
450 adapter->eth_rx_bytes += pktsize;
451 update_rx_rate(adapter);
452 nc++;
453 max_work--;
454 adapter->be_stat.bes_rx_compl++;
455 }
456 if (likely(adapter->max_rx_coal > 1)) {
457 adapter->be_stat.bes_rx_flush++;
458 lro_flush_all(&pnob->lro_mgr);
459 }
460
461 /* Refill the queue */
462 if (atomic_read(&pnob->rx_q_posted) < 900)
463 be_post_eth_rx_buffs(pnob);
464
465 return nc;
466}
467
468static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob)
469{
470 struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl];
471 u32 valid;
472
473 valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp);
474 if (valid == 0)
475 return NULL;
476
477 AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0);
478 be_adv_txcq_tl(pnob);
479 return txcp;
480
481}
482
483void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx)
484{
485 struct be_adapter *adapter = pnob->adapter;
486 int cur_index, tx_wrbs_completed = 0;
487 struct sk_buff *skb;
488 u64 busaddr, pa, pa_lo, pa_hi;
489 struct ETH_WRB_AMAP *wrb;
490 u32 frag_len, last_index, j;
491
492 last_index = tx_compl_lastwrb_idx_get(pnob);
493 BUG_ON(last_index != end_idx);
494 pnob->tx_ctxt[pnob->tx_q_tl] = NULL;
495 do {
496 cur_index = pnob->tx_q_tl;
497 wrb = &pnob->tx_q[cur_index];
498 pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb);
499 pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb);
500 frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb);
501 busaddr = (pa_hi << 32) | pa_lo;
502 if (busaddr != 0) {
503 pa = le64_to_cpu(busaddr);
504 pci_unmap_single(adapter->pdev, pa,
505 frag_len, PCI_DMA_TODEVICE);
506 }
507 if (cur_index == last_index) {
508 skb = (struct sk_buff *)pnob->tx_ctxt[cur_index];
509 BUG_ON(!skb);
510 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
511 struct skb_frag_struct *frag;
512 frag = &skb_shinfo(skb)->frags[j];
513 pci_unmap_page(adapter->pdev,
514 (ulong) frag->page, frag->size,
515 PCI_DMA_TODEVICE);
516 }
517 kfree_skb(skb);
518 pnob->tx_ctxt[cur_index] = NULL;
519 } else {
520 BUG_ON(pnob->tx_ctxt[cur_index]);
521 }
522 tx_wrbs_completed++;
523 be_adv_txq_tl(pnob);
524 } while (cur_index != last_index);
525 atomic_sub(tx_wrbs_completed, &pnob->tx_q_used);
526}
527
528/* there is no need to take an SMP lock here since currently
529 * we have only one instance of the tasklet that does completion
530 * processing.
531 */
532static void process_nic_tx_completions(struct be_net_object *pnob)
533{
534 struct be_adapter *adapter = pnob->adapter;
535 struct ETH_TX_COMPL_AMAP *txcp;
536 struct net_device *netdev = pnob->netdev;
537 u32 end_idx, num_processed = 0;
538
539 adapter->be_stat.bes_tx_events++;
540
541 while ((txcp = be_get_tx_cmpl(pnob))) {
542 end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp);
543 process_one_tx_compl(pnob, end_idx);
544 num_processed++;
545 adapter->be_stat.bes_tx_compl++;
546 }
547 be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1);
548 /*
549 * We got Tx completions and have usable WRBs.
550 * If the netdev's queue has been stopped
551 * because we had run out of WRBs, wake it now.
552 */
553 spin_lock(&adapter->txq_lock);
554 if (netif_queue_stopped(netdev)
555 && atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) {
556 netif_wake_queue(netdev);
557 }
558 spin_unlock(&adapter->txq_lock);
559}
560
561static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl)
562{
563 u32 nposted = 0;
564 struct ETH_RX_D_AMAP *rxd = NULL;
565 struct be_recv_buffer *rxbp;
566 void **rx_ctxp;
567 struct RQ_DB_AMAP rqdb;
568
569 rx_ctxp = pnob->rx_ctxt;
570
571 while (!list_empty(rxbl) &&
572 (rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) {
573
574 rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list);
575 list_del(&rxbp->rxb_list);
576 rxd = pnob->rx_q + pnob->rx_q_hd;
577 AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo);
578 AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi);
579
580 rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt;
581 be_adv_rxq_hd(pnob);
582 nposted++;
583 }
584
585 if (nposted) {
586 /* Now press the door bell to notify BladeEngine. */
587 rqdb.dw[0] = 0;
588 AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted);
589 AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id);
590 PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]);
591 }
592 atomic_add(nposted, &pnob->rx_q_posted);
593 return nposted;
594}
595
596void be_post_eth_rx_buffs(struct be_net_object *pnob)
597{
598 struct be_adapter *adapter = pnob->adapter;
599 u32 num_bufs, r;
600 u64 busaddr = 0, tmp_pa;
601 u32 max_bufs, pg_hd;
602 u32 frag_size;
603 struct be_recv_buffer *rxbp;
604 struct list_head rxbl;
605 struct be_rx_page_info *rx_page_info;
606 struct page *page = NULL;
607 u32 page_order = 0;
608 gfp_t alloc_flags = GFP_ATOMIC;
609
610 BUG_ON(!adapter);
611
612 max_bufs = 64; /* should be even # <= 255. */
613
614 frag_size = pnob->rx_buf_size;
615 page_order = get_order(frag_size);
616
617 if (frag_size == 8192)
618 alloc_flags |= (gfp_t) __GFP_COMP;
619 /*
620 * Form a linked list of RECV_BUFFFER structure to be be posted.
621 * We will post even number of buffer so that pages can be
622 * shared.
623 */
624 INIT_LIST_HEAD(&rxbl);
625
626 for (num_bufs = 0; num_bufs < max_bufs &&
627 !pnob->rx_page_info[pnob->rx_pg_info_hd].page; ++num_bufs) {
628
629 rxbp = &pnob->eth_rx_bufs[num_bufs];
630 pg_hd = pnob->rx_pg_info_hd;
631 rx_page_info = &pnob->rx_page_info[pg_hd];
632
633 if (!page) {
634 page = alloc_pages(alloc_flags, page_order);
635 if (unlikely(page == NULL)) {
636 adapter->be_stat.bes_ethrx_post_fail++;
637 pnob->rxbuf_post_fail++;
638 break;
639 }
640 pnob->rxbuf_post_fail = 0;
641 busaddr = pci_map_page(adapter->pdev, page, 0,
642 frag_size, PCI_DMA_FROMDEVICE);
643 rx_page_info->page_offset = 0;
644 rx_page_info->page = page;
645 /*
646 * If we are sharing a page among two skbs,
647 * alloc a new one on the next iteration
648 */
649 if (pnob->rx_pg_shared == false)
650 page = NULL;
651 } else {
652 get_page(page);
653 rx_page_info->page_offset += frag_size;
654 rx_page_info->page = page;
655 /*
656 * We are finished with the alloced page,
657 * Alloc a new one on the next iteration
658 */
659 page = NULL;
660 }
661 rxbp->rxb_ctxt = (void *)rx_page_info;
662 index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len);
663
664 pci_unmap_addr_set(rx_page_info, bus, busaddr);
665 tmp_pa = busaddr + rx_page_info->page_offset;
666 rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF);
667 rxbp->rxb_pa_hi = (tmp_pa >> 32);
668 rxbp->rxb_len = frag_size;
669 list_add_tail(&rxbp->rxb_list, &rxbl);
670 } /* End of for */
671
672 r = post_rx_buffs(pnob, &rxbl);
673 BUG_ON(r != num_bufs);
674 return;
675}
676
677/*
678 * Interrupt service for network function. We just schedule the
679 * tasklet which does all completion processing.
680 */
681irqreturn_t be_int(int irq, void *dev)
682{
683 struct net_device *netdev = dev;
684 struct be_net_object *pnob = netdev_priv(netdev);
685 struct be_adapter *adapter = pnob->adapter;
686 u32 isr;
687
688 isr = CSR_READ(&pnob->fn_obj, cev.isr1);
689 if (unlikely(!isr))
690 return IRQ_NONE;
691
692 spin_lock(&adapter->int_lock);
693 adapter->isr |= isr;
694 spin_unlock(&adapter->int_lock);
695
696 adapter->be_stat.bes_ints++;
697
698 tasklet_schedule(&adapter->sts_handler);
699 return IRQ_HANDLED;
700}
701
702/*
703 * Poll function called by NAPI with a work budget.
704 * We process as many UC. BC and MC receive completions
705 * as the budget allows and return the actual number of
706 * RX ststutses processed.
707 */
708int be_poll(struct napi_struct *napi, int budget)
709{
710 struct be_net_object *pnob =
711 container_of(napi, struct be_net_object, napi);
712 u32 work_done;
713
714 pnob->adapter->be_stat.bes_polls++;
715 work_done = process_rx_completions(pnob, budget);
716 BUG_ON(work_done > budget);
717
718 /* All consumed */
719 if (work_done < budget) {
720 netif_rx_complete(napi);
721 /* enable intr */
722 be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1);
723 } else {
724 /* More to be consumed; continue with interrupts disabled */
725 be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0);
726 }
727 return work_done;
728}
729
730static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob)
731{
732 struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]);
733 if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp))
734 return NULL;
735 be_adv_eq_tl(pnob);
736 return eqp;
737}
738
739/*
740 * Processes all valid events in the event ring associated with given
741 * NetObject. Also, notifies BE the number of events processed.
742 */
743static inline u32 process_events(struct be_net_object *pnob)
744{
745 struct be_adapter *adapter = pnob->adapter;
746 struct EQ_ENTRY_AMAP *eqp;
747 u32 rid, num_events = 0;
748 struct net_device *netdev = pnob->netdev;
749
750 while ((eqp = get_event(pnob)) != NULL) {
751 adapter->be_stat.bes_events++;
752 rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp);
753 if (rid == pnob->rx_cq_id) {
754 adapter->be_stat.bes_rx_events++;
755 netif_rx_schedule(&pnob->napi);
756 } else if (rid == pnob->tx_cq_id) {
757 process_nic_tx_completions(pnob);
758 } else if (rid == pnob->mcc_cq_id) {
759 be_mcc_process_cq(&pnob->mcc_q_obj, 1);
760 } else {
761 dev_info(&netdev->dev,
762 "Invalid EQ ResourceID %d\n", rid);
763 }
764 AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0);
765 AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0);
766 num_events++;
767 }
768 return num_events;
769}
770
771static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob)
772{
773 int status;
774 struct be_eq_object *eq_objectp;
775
776 /* update once a second */
777 if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) {
778 /* One second elapsed since last update */
779 u32 r, new_eqd = -1;
780 r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints;
781 r = r / ((jiffies - adapter->ips_jiffies) / (HZ));
782 adapter->be_stat.bes_ips = r;
783 adapter->ips_jiffies = jiffies;
784 adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints;
785 if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd)
786 new_eqd = (adapter->cur_eqd + 8);
787 if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd)
788 new_eqd = (adapter->cur_eqd - 8);
789 if (adapter->enable_aic && new_eqd != -1) {
790 eq_objectp = &pnob->event_q_obj;
791 status = be_eq_modify_delay(&pnob->fn_obj, 1,
792 &eq_objectp, &new_eqd, NULL,
793 NULL, NULL);
794 if (status == BE_SUCCESS)
795 adapter->cur_eqd = new_eqd;
796 }
797 }
798}
799
800/*
801 This function notifies BladeEngine of how many events were processed
802 from the event queue by ringing the corresponding door bell and
803 optionally re-arms the event queue.
804 n - number of events processed
805 re_arm - 1 - re-arm the EQ, 0 - do not re-arm the EQ
806
807*/
808static void be_notify_event(struct be_net_object *pnob, int n, int re_arm)
809{
810 struct CQ_DB_AMAP eqdb;
811 eqdb.dw[0] = 0;
812
813 AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id);
814 AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm);
815 AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1);
816 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n);
817 /*
818 * Under some situations we see an interrupt and no valid
819 * EQ entry. To keep going, we need to ring the DB even if
820 * numPOsted is 0.
821 */
822 PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]);
823 return;
824}
825
826/*
827 * Called from the tasklet scheduled by ISR. All real interrupt processing
828 * is done here.
829 */
830void be_process_intr(unsigned long context)
831{
832 struct be_adapter *adapter = (struct be_adapter *)context;
833 struct be_net_object *pnob = adapter->net_obj;
834 u32 isr, n;
835 ulong flags = 0;
836
837 isr = adapter->isr;
838
839 /*
840 * we create only one NIC event queue in Linux. Event is
841 * expected only in the first event queue
842 */
843 BUG_ON(isr & 0xfffffffe);
844 if ((isr & 1) == 0)
845 return; /* not our interrupt */
846 n = process_events(pnob);
847 /*
848 * Clear the event bit. adapter->isr is set by
849 * hard interrupt. Prevent race with lock.
850 */
851 spin_lock_irqsave(&adapter->int_lock, flags);
852 adapter->isr &= ~1;
853 spin_unlock_irqrestore(&adapter->int_lock, flags);
854 be_notify_event(pnob, n, 1);
855 /*
856 * If previous allocation attempts had failed and
857 * BE has used up all posted buffers, post RX buffers here
858 */
859 if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0)
860 be_post_eth_rx_buffs(pnob);
861 update_eqd(adapter, pnob);
862 return;
863}
diff --git a/drivers/staging/benet/be_netif.c b/drivers/staging/benet/be_netif.c
deleted file mode 100644
index 2b8daf63dc7d..000000000000
--- a/drivers/staging/benet/be_netif.c
+++ /dev/null
@@ -1,705 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * be_netif.c
19 *
20 * This file contains various entry points of drivers seen by tcp/ip stack.
21 */
22
23#include <linux/if_vlan.h>
24#include <linux/in.h>
25#include "benet.h"
26#include <linux/ip.h>
27#include <linux/inet_lro.h>
28
29/* Strings to print Link properties */
30static const char *link_speed[] = {
31 "Invalid link Speed Value",
32 "10 Mbps",
33 "100 Mbps",
34 "1 Gbps",
35 "10 Gbps"
36};
37
38static const char *link_duplex[] = {
39 "Invalid Duplex Value",
40 "Half Duplex",
41 "Full Duplex"
42};
43
44static const char *link_state[] = {
45 "",
46 "(active)"
47};
48
49void be_print_link_info(struct BE_LINK_STATUS *lnk_status)
50{
51 u16 si, di, ai;
52
53 /* Port 0 */
54 if (lnk_status->mac0_speed && lnk_status->mac0_duplex) {
55 /* Port is up and running */
56 si = (lnk_status->mac0_speed < 5) ? lnk_status->mac0_speed : 0;
57 di = (lnk_status->mac0_duplex < 3) ?
58 lnk_status->mac0_duplex : 0;
59 ai = (lnk_status->active_port == 0) ? 1 : 0;
60 printk(KERN_INFO "PortNo. 0: Speed - %s %s %s\n",
61 link_speed[si], link_duplex[di], link_state[ai]);
62 } else
63 printk(KERN_INFO "PortNo. 0: Down\n");
64
65 /* Port 1 */
66 if (lnk_status->mac1_speed && lnk_status->mac1_duplex) {
67 /* Port is up and running */
68 si = (lnk_status->mac1_speed < 5) ? lnk_status->mac1_speed : 0;
69 di = (lnk_status->mac1_duplex < 3) ?
70 lnk_status->mac1_duplex : 0;
71 ai = (lnk_status->active_port == 0) ? 1 : 0;
72 printk(KERN_INFO "PortNo. 1: Speed - %s %s %s\n",
73 link_speed[si], link_duplex[di], link_state[ai]);
74 } else
75 printk(KERN_INFO "PortNo. 1: Down\n");
76
77 return;
78}
79
80static int
81be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
82 void **ip_hdr, void **tcpudp_hdr,
83 u64 *hdr_flags, void *priv)
84{
85 struct ethhdr *eh;
86 struct vlan_ethhdr *veh;
87 struct iphdr *iph;
88 u8 *va = page_address(frag->page) + frag->page_offset;
89 unsigned long ll_hlen;
90
91 /* find the mac header, abort if not IPv4 */
92
93 prefetch(va);
94 eh = (struct ethhdr *)va;
95 *mac_hdr = eh;
96 ll_hlen = ETH_HLEN;
97 if (eh->h_proto != htons(ETH_P_IP)) {
98 if (eh->h_proto == htons(ETH_P_8021Q)) {
99 veh = (struct vlan_ethhdr *)va;
100 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
101 return -1;
102
103 ll_hlen += VLAN_HLEN;
104
105 } else {
106 return -1;
107 }
108 }
109 *hdr_flags = LRO_IPV4;
110
111 iph = (struct iphdr *)(va + ll_hlen);
112 *ip_hdr = iph;
113 if (iph->protocol != IPPROTO_TCP)
114 return -1;
115 *hdr_flags |= LRO_TCP;
116 *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
117
118 return 0;
119}
120
121static int benet_open(struct net_device *netdev)
122{
123 struct be_net_object *pnob = netdev_priv(netdev);
124 struct be_adapter *adapter = pnob->adapter;
125 struct net_lro_mgr *lro_mgr;
126
127 if (adapter->dev_state < BE_DEV_STATE_INIT)
128 return -EAGAIN;
129
130 lro_mgr = &pnob->lro_mgr;
131 lro_mgr->dev = netdev;
132
133 lro_mgr->features = LRO_F_NAPI;
134 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
135 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
136 lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
137 lro_mgr->lro_arr = pnob->lro_desc;
138 lro_mgr->get_frag_header = be_get_frag_header;
139 lro_mgr->max_aggr = adapter->max_rx_coal;
140 lro_mgr->frag_align_pad = 2;
141 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
142 lro_mgr->max_aggr = MAX_SKB_FRAGS;
143
144 adapter->max_rx_coal = BE_LRO_MAX_PKTS;
145
146 be_update_link_status(adapter);
147
148 /*
149 * Set carrier on only if Physical Link up
150 * Either of the port link status up signifies this
151 */
152 if ((adapter->port0_link_sts == BE_PORT_LINK_UP) ||
153 (adapter->port1_link_sts == BE_PORT_LINK_UP)) {
154 netif_start_queue(netdev);
155 netif_carrier_on(netdev);
156 }
157
158 adapter->dev_state = BE_DEV_STATE_OPEN;
159 napi_enable(&pnob->napi);
160 be_enable_intr(pnob);
161 be_enable_eq_intr(pnob);
162 /*
163 * RX completion queue may be in dis-armed state. Arm it.
164 */
165 be_notify_cmpl(pnob, 0, pnob->rx_cq_id, 1);
166
167 return 0;
168}
169
170static int benet_close(struct net_device *netdev)
171{
172 struct be_net_object *pnob = netdev_priv(netdev);
173 struct be_adapter *adapter = pnob->adapter;
174
175 netif_stop_queue(netdev);
176 synchronize_irq(netdev->irq);
177
178 be_wait_nic_tx_cmplx_cmpl(pnob);
179 adapter->dev_state = BE_DEV_STATE_INIT;
180 netif_carrier_off(netdev);
181
182 adapter->port0_link_sts = BE_PORT_LINK_DOWN;
183 adapter->port1_link_sts = BE_PORT_LINK_DOWN;
184 be_disable_intr(pnob);
185 be_disable_eq_intr(pnob);
186 napi_disable(&pnob->napi);
187
188 return 0;
189}
190
191/*
192 * Setting a Mac Address for BE
193 * Takes netdev and a void pointer as arguments.
194 * The pointer holds the new addres to be used.
195 */
196static int benet_set_mac_addr(struct net_device *netdev, void *p)
197{
198 struct sockaddr *addr = p;
199 struct be_net_object *pnob = netdev_priv(netdev);
200
201 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
202 be_rxf_mac_address_read_write(&pnob->fn_obj, 0, 0, false, true, false,
203 netdev->dev_addr, NULL, NULL);
204 /*
205 * Since we are doing Active-Passive failover, both
206 * ports should have matching MAC addresses everytime.
207 */
208 be_rxf_mac_address_read_write(&pnob->fn_obj, 1, 0, false, true, false,
209 netdev->dev_addr, NULL, NULL);
210
211 return 0;
212}
213
214void be_get_stats_timer_handler(unsigned long context)
215{
216 struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
217
218 if (atomic_read(&ctxt->get_stat_flag)) {
219 atomic_dec(&ctxt->get_stat_flag);
220 up((void *)ctxt->get_stat_sem_addr);
221 }
222 del_timer(&ctxt->get_stats_timer);
223 return;
224}
225
226void be_get_stat_cb(void *context, int status,
227 struct MCC_WRB_AMAP *optional_wrb)
228{
229 struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
230 /*
231 * just up the semaphore if the get_stat_flag
232 * reads 1. so that the waiter can continue.
233 * If it is 0, then it was handled by the timer handler.
234 */
235 del_timer(&ctxt->get_stats_timer);
236 if (atomic_read(&ctxt->get_stat_flag)) {
237 atomic_dec(&ctxt->get_stat_flag);
238 up((void *)ctxt->get_stat_sem_addr);
239 }
240}
241
242struct net_device_stats *benet_get_stats(struct net_device *dev)
243{
244 struct be_net_object *pnob = netdev_priv(dev);
245 struct be_adapter *adapter = pnob->adapter;
246 u64 pa;
247 struct be_timer_ctxt *ctxt = &adapter->timer_ctxt;
248
249 if (adapter->dev_state != BE_DEV_STATE_OPEN) {
250 /* Return previously read stats */
251 return &(adapter->benet_stats);
252 }
253 /* Get Physical Addr */
254 pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
255 sizeof(struct FWCMD_ETH_GET_STATISTICS),
256 PCI_DMA_FROMDEVICE);
257 ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem;
258 atomic_inc(&ctxt->get_stat_flag);
259
260 be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp,
261 cpu_to_le64(pa), be_get_stat_cb, ctxt,
262 NULL);
263
264 ctxt->get_stats_timer.data = (unsigned long)ctxt;
265 mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
266 down((void *)ctxt->get_stat_sem_addr); /* callback will unblock us */
267
268 /* Adding port0 and port1 stats. */
269 adapter->benet_stats.rx_packets =
270 adapter->eth_statsp->params.response.p0recvdtotalframes +
271 adapter->eth_statsp->params.response.p1recvdtotalframes;
272 adapter->benet_stats.tx_packets =
273 adapter->eth_statsp->params.response.p0xmitunicastframes +
274 adapter->eth_statsp->params.response.p1xmitunicastframes;
275 adapter->benet_stats.tx_bytes =
276 adapter->eth_statsp->params.response.p0xmitbyteslsd +
277 adapter->eth_statsp->params.response.p1xmitbyteslsd;
278 adapter->benet_stats.rx_errors =
279 adapter->eth_statsp->params.response.p0crcerrors +
280 adapter->eth_statsp->params.response.p1crcerrors;
281 adapter->benet_stats.rx_errors +=
282 adapter->eth_statsp->params.response.p0alignmentsymerrs +
283 adapter->eth_statsp->params.response.p1alignmentsymerrs;
284 adapter->benet_stats.rx_errors +=
285 adapter->eth_statsp->params.response.p0inrangelenerrors +
286 adapter->eth_statsp->params.response.p1inrangelenerrors;
287 adapter->benet_stats.rx_bytes =
288 adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
289 adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
290 adapter->benet_stats.rx_crc_errors =
291 adapter->eth_statsp->params.response.p0crcerrors +
292 adapter->eth_statsp->params.response.p1crcerrors;
293
294 adapter->benet_stats.tx_packets +=
295 adapter->eth_statsp->params.response.p0xmitmulticastframes +
296 adapter->eth_statsp->params.response.p1xmitmulticastframes;
297 adapter->benet_stats.tx_packets +=
298 adapter->eth_statsp->params.response.p0xmitbroadcastframes +
299 adapter->eth_statsp->params.response.p1xmitbroadcastframes;
300 adapter->benet_stats.tx_errors = 0;
301
302 adapter->benet_stats.multicast =
303 adapter->eth_statsp->params.response.p0xmitmulticastframes +
304 adapter->eth_statsp->params.response.p1xmitmulticastframes;
305
306 adapter->benet_stats.rx_fifo_errors =
307 adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
308 adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
309 adapter->benet_stats.rx_frame_errors =
310 adapter->eth_statsp->params.response.p0alignmentsymerrs +
311 adapter->eth_statsp->params.response.p1alignmentsymerrs;
312 adapter->benet_stats.rx_length_errors =
313 adapter->eth_statsp->params.response.p0inrangelenerrors +
314 adapter->eth_statsp->params.response.p1inrangelenerrors;
315 adapter->benet_stats.rx_length_errors +=
316 adapter->eth_statsp->params.response.p0outrangeerrors +
317 adapter->eth_statsp->params.response.p1outrangeerrors;
318 adapter->benet_stats.rx_length_errors +=
319 adapter->eth_statsp->params.response.p0frametoolongerrors +
320 adapter->eth_statsp->params.response.p1frametoolongerrors;
321
322 pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
323 sizeof(struct FWCMD_ETH_GET_STATISTICS),
324 PCI_DMA_FROMDEVICE);
325 return &(adapter->benet_stats);
326
327}
328
329static void be_start_tx(struct be_net_object *pnob, u32 nposted)
330{
331#define CSR_ETH_MAX_SQPOSTS 255
332 struct SQ_DB_AMAP sqdb;
333
334 sqdb.dw[0] = 0;
335
336 AMAP_SET_BITS_PTR(SQ_DB, cid, &sqdb, pnob->tx_q_id);
337 while (nposted) {
338 if (nposted > CSR_ETH_MAX_SQPOSTS) {
339 AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb,
340 CSR_ETH_MAX_SQPOSTS);
341 nposted -= CSR_ETH_MAX_SQPOSTS;
342 } else {
343 AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, nposted);
344 nposted = 0;
345 }
346 PD_WRITE(&pnob->fn_obj, etx_sq_db, sqdb.dw[0]);
347 }
348
349 return;
350}
351
352static void update_tx_rate(struct be_adapter *adapter)
353{
354 /* update the rate once in two seconds */
355 if ((jiffies - adapter->eth_tx_jiffies) > 2 * (HZ)) {
356 u32 r;
357 r = adapter->eth_tx_bytes /
358 ((jiffies - adapter->eth_tx_jiffies) / (HZ));
359 r = (r / 1000000); /* M bytes/s */
360 adapter->be_stat.bes_eth_tx_rate = (r * 8); /* M bits/s */
361 adapter->eth_tx_jiffies = jiffies;
362 adapter->eth_tx_bytes = 0;
363 }
364}
365
366static int wrb_cnt_in_skb(struct sk_buff *skb)
367{
368 int cnt = 0;
369 while (skb) {
370 if (skb->len > skb->data_len)
371 cnt++;
372 cnt += skb_shinfo(skb)->nr_frags;
373 skb = skb_shinfo(skb)->frag_list;
374 }
375 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
376 return cnt;
377}
378
379static void wrb_fill(struct ETH_WRB_AMAP *wrb, u64 addr, int len)
380{
381 AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb, addr >> 32);
382 AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb, addr & 0xFFFFFFFF);
383 AMAP_SET_BITS_PTR(ETH_WRB, frag_len, wrb, len);
384}
385
386static void wrb_fill_extra(struct ETH_WRB_AMAP *wrb, struct sk_buff *skb,
387 struct be_net_object *pnob)
388{
389 wrb->dw[2] = 0;
390 wrb->dw[3] = 0;
391 AMAP_SET_BITS_PTR(ETH_WRB, crc, wrb, 1);
392 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
393 AMAP_SET_BITS_PTR(ETH_WRB, lso, wrb, 1);
394 AMAP_SET_BITS_PTR(ETH_WRB, lso_mss, wrb,
395 skb_shinfo(skb)->gso_size);
396 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
397 u8 proto = ((struct iphdr *)ip_hdr(skb))->protocol;
398 if (proto == IPPROTO_TCP)
399 AMAP_SET_BITS_PTR(ETH_WRB, tcpcs, wrb, 1);
400 else if (proto == IPPROTO_UDP)
401 AMAP_SET_BITS_PTR(ETH_WRB, udpcs, wrb, 1);
402 }
403 if (pnob->vlan_grp && vlan_tx_tag_present(skb)) {
404 AMAP_SET_BITS_PTR(ETH_WRB, vlan, wrb, 1);
405 AMAP_SET_BITS_PTR(ETH_WRB, vlan_tag, wrb, vlan_tx_tag_get(skb));
406 }
407}
408
409static inline void wrb_copy_extra(struct ETH_WRB_AMAP *to,
410 struct ETH_WRB_AMAP *from)
411{
412
413 to->dw[2] = from->dw[2];
414 to->dw[3] = from->dw[3];
415}
416
417/* Returns the actual count of wrbs used including a possible dummy */
418static int copy_skb_to_txq(struct be_net_object *pnob, struct sk_buff *skb,
419 u32 wrb_cnt, u32 *copied)
420{
421 u64 busaddr;
422 struct ETH_WRB_AMAP *wrb = NULL, *first = NULL;
423 u32 i;
424 bool dummy = true;
425 struct pci_dev *pdev = pnob->adapter->pdev;
426
427 if (wrb_cnt & 1)
428 wrb_cnt++;
429 else
430 dummy = false;
431
432 atomic_add(wrb_cnt, &pnob->tx_q_used);
433
434 while (skb) {
435 if (skb->len > skb->data_len) {
436 int len = skb->len - skb->data_len;
437 busaddr = pci_map_single(pdev, skb->data, len,
438 PCI_DMA_TODEVICE);
439 busaddr = cpu_to_le64(busaddr);
440 wrb = &pnob->tx_q[pnob->tx_q_hd];
441 if (first == NULL) {
442 wrb_fill_extra(wrb, skb, pnob);
443 first = wrb;
444 } else {
445 wrb_copy_extra(wrb, first);
446 }
447 wrb_fill(wrb, busaddr, len);
448 be_adv_txq_hd(pnob);
449 *copied += len;
450 }
451
452 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
453 struct skb_frag_struct *frag =
454 &skb_shinfo(skb)->frags[i];
455 busaddr = pci_map_page(pdev, frag->page,
456 frag->page_offset, frag->size,
457 PCI_DMA_TODEVICE);
458 busaddr = cpu_to_le64(busaddr);
459 wrb = &pnob->tx_q[pnob->tx_q_hd];
460 if (first == NULL) {
461 wrb_fill_extra(wrb, skb, pnob);
462 first = wrb;
463 } else {
464 wrb_copy_extra(wrb, first);
465 }
466 wrb_fill(wrb, busaddr, frag->size);
467 be_adv_txq_hd(pnob);
468 *copied += frag->size;
469 }
470 skb = skb_shinfo(skb)->frag_list;
471 }
472
473 if (dummy) {
474 wrb = &pnob->tx_q[pnob->tx_q_hd];
475 BUG_ON(first == NULL);
476 wrb_copy_extra(wrb, first);
477 wrb_fill(wrb, 0, 0);
478 be_adv_txq_hd(pnob);
479 }
480 AMAP_SET_BITS_PTR(ETH_WRB, complete, wrb, 1);
481 AMAP_SET_BITS_PTR(ETH_WRB, last, wrb, 1);
482 return wrb_cnt;
483}
484
485/* For each skb transmitted, tx_ctxt stores the num of wrbs in the
486 * start index and skb pointer in the end index
487 */
488static inline void be_tx_wrb_info_remember(struct be_net_object *pnob,
489 struct sk_buff *skb, int wrb_cnt,
490 u32 start)
491{
492 *(u32 *) (&pnob->tx_ctxt[start]) = wrb_cnt;
493 index_adv(&start, wrb_cnt - 1, pnob->tx_q_len);
494 pnob->tx_ctxt[start] = skb;
495}
496
497static int benet_xmit(struct sk_buff *skb, struct net_device *netdev)
498{
499 struct be_net_object *pnob = netdev_priv(netdev);
500 struct be_adapter *adapter = pnob->adapter;
501 u32 wrb_cnt, copied = 0;
502 u32 start = pnob->tx_q_hd;
503
504 adapter->be_stat.bes_tx_reqs++;
505
506 wrb_cnt = wrb_cnt_in_skb(skb);
507 spin_lock_bh(&adapter->txq_lock);
508 if ((pnob->tx_q_len - 2 - atomic_read(&pnob->tx_q_used)) <= wrb_cnt) {
509 netif_stop_queue(pnob->netdev);
510 spin_unlock_bh(&adapter->txq_lock);
511 adapter->be_stat.bes_tx_fails++;
512 return NETDEV_TX_BUSY;
513 }
514 spin_unlock_bh(&adapter->txq_lock);
515
516 wrb_cnt = copy_skb_to_txq(pnob, skb, wrb_cnt, &copied);
517 be_tx_wrb_info_remember(pnob, skb, wrb_cnt, start);
518
519 be_start_tx(pnob, wrb_cnt);
520
521 adapter->eth_tx_bytes += copied;
522 adapter->be_stat.bes_tx_wrbs += wrb_cnt;
523 update_tx_rate(adapter);
524 netdev->trans_start = jiffies;
525
526 return NETDEV_TX_OK;
527}
528
529/*
530 * This is the driver entry point to change the mtu of the device
531 * Returns 0 for success and errno for failure.
532 */
533static int benet_change_mtu(struct net_device *netdev, int new_mtu)
534{
535 /*
536 * BE supports jumbo frame size upto 9000 bytes including the link layer
537 * header. Considering the different variants of frame formats possible
538 * like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes
539 */
540
541 if (new_mtu < (ETH_ZLEN + ETH_FCS_LEN) || (new_mtu > BE_MAX_MTU)) {
542 dev_info(&netdev->dev, "Invalid MTU requested. "
543 "Must be between %d and %d bytes\n",
544 (ETH_ZLEN + ETH_FCS_LEN), BE_MAX_MTU);
545 return -EINVAL;
546 }
547 dev_info(&netdev->dev, "MTU changed from %d to %d\n",
548 netdev->mtu, new_mtu);
549 netdev->mtu = new_mtu;
550 return 0;
551}
552
553/*
554 * This is the driver entry point to register a vlan with the device
555 */
556static void benet_vlan_register(struct net_device *netdev,
557 struct vlan_group *grp)
558{
559 struct be_net_object *pnob = netdev_priv(netdev);
560
561 be_disable_eq_intr(pnob);
562 pnob->vlan_grp = grp;
563 pnob->num_vlans = 0;
564 be_enable_eq_intr(pnob);
565}
566
567/*
568 * This is the driver entry point to add a vlan vlan_id
569 * with the device netdev
570 */
571static void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id)
572{
573 struct be_net_object *pnob = netdev_priv(netdev);
574
575 if (pnob->num_vlans == (BE_NUM_VLAN_SUPPORTED - 1)) {
576 /* no way to return an error */
577 dev_info(&netdev->dev,
578 "BladeEngine: Cannot configure more than %d Vlans\n",
579 BE_NUM_VLAN_SUPPORTED);
580 return;
581 }
582 /* The new vlan tag will be in the slot indicated by num_vlans. */
583 pnob->vlan_tag[pnob->num_vlans++] = vlan_id;
584 be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
585 pnob->vlan_tag, NULL, NULL, NULL);
586}
587
588/*
589 * This is the driver entry point to remove a vlan vlan_id
590 * with the device netdev
591 */
592static void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id)
593{
594 struct be_net_object *pnob = netdev_priv(netdev);
595
596 u32 i, value;
597
598 /*
599 * In Blade Engine, we support 32 vlan tag filters across both ports.
600 * To program a vlan tag, the RXF_RTPR_CSR register is used.
601 * Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries.
602 * The Vlan table is of depth 16. thus we support 32 tags.
603 */
604
605 value = vlan_id | VLAN_VALID_BIT;
606 for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) {
607 if (pnob->vlan_tag[i] == vlan_id)
608 break;
609 }
610
611 if (i == BE_NUM_VLAN_SUPPORTED)
612 return;
613 /* Now compact the vlan tag array by removing hole created. */
614 while ((i + 1) < BE_NUM_VLAN_SUPPORTED) {
615 pnob->vlan_tag[i] = pnob->vlan_tag[i + 1];
616 i++;
617 }
618 if ((i + 1) == BE_NUM_VLAN_SUPPORTED)
619 pnob->vlan_tag[i] = (u16) 0x0;
620 pnob->num_vlans--;
621 be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
622 pnob->vlan_tag, NULL, NULL, NULL);
623}
624
625/*
626 * This function is called to program multicast
627 * address in the multicast filter of the ASIC.
628 */
629static void be_set_multicast_filter(struct net_device *netdev)
630{
631 struct be_net_object *pnob = netdev_priv(netdev);
632 struct dev_mc_list *mc_ptr;
633 u8 mac_addr[32][ETH_ALEN];
634 int i;
635
636 if (netdev->flags & IFF_ALLMULTI) {
637 /* set BE in Multicast promiscuous */
638 be_rxf_multicast_config(&pnob->fn_obj, true, 0, NULL, NULL,
639 NULL, NULL);
640 return;
641 }
642
643 for (mc_ptr = netdev->mc_list, i = 0; mc_ptr;
644 mc_ptr = mc_ptr->next, i++) {
645 memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
646 }
647
648 /* reset the promiscuous mode also. */
649 be_rxf_multicast_config(&pnob->fn_obj, false, i,
650 &mac_addr[0][0], NULL, NULL, NULL);
651}
652
653/*
654 * This is the driver entry point to set multicast list
655 * with the device netdev. This function will be used to
656 * set promiscuous mode or multicast promiscuous mode
657 * or multicast mode....
658 */
659static void benet_set_multicast_list(struct net_device *netdev)
660{
661 struct be_net_object *pnob = netdev_priv(netdev);
662
663 if (netdev->flags & IFF_PROMISC) {
664 be_rxf_promiscuous(&pnob->fn_obj, 1, 1, NULL, NULL, NULL);
665 } else {
666 be_rxf_promiscuous(&pnob->fn_obj, 0, 0, NULL, NULL, NULL);
667 be_set_multicast_filter(netdev);
668 }
669}
670
671int benet_init(struct net_device *netdev)
672{
673 struct be_net_object *pnob = netdev_priv(netdev);
674 struct be_adapter *adapter = pnob->adapter;
675
676 ether_setup(netdev);
677
678 netdev->open = &benet_open;
679 netdev->stop = &benet_close;
680 netdev->hard_start_xmit = &benet_xmit;
681
682 netdev->get_stats = &benet_get_stats;
683
684 netdev->set_multicast_list = &benet_set_multicast_list;
685
686 netdev->change_mtu = &benet_change_mtu;
687 netdev->set_mac_address = &benet_set_mac_addr;
688
689 netdev->vlan_rx_register = benet_vlan_register;
690 netdev->vlan_rx_add_vid = benet_vlan_add_vid;
691 netdev->vlan_rx_kill_vid = benet_vlan_rem_vid;
692
693 netdev->features =
694 NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
695 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM;
696
697 netdev->flags |= IFF_MULTICAST;
698
699 /* If device is DAC Capable, set the HIGHDMA flag for netdevice. */
700 if (adapter->dma_64bit_cap)
701 netdev->features |= NETIF_F_HIGHDMA;
702
703 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
704 return 0;
705}
diff --git a/drivers/staging/benet/benet.h b/drivers/staging/benet/benet.h
deleted file mode 100644
index 09a1f0817722..000000000000
--- a/drivers/staging/benet/benet.h
+++ /dev/null
@@ -1,429 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#ifndef _BENET_H_
18#define _BENET_H_
19
20#include <linux/pci.h>
21#include <linux/netdevice.h>
22#include <linux/inet_lro.h>
23#include "hwlib.h"
24
25#define _SA_MODULE_NAME "net-driver"
26
27#define VLAN_VALID_BIT 0x8000
28#define BE_NUM_VLAN_SUPPORTED 32
29#define BE_PORT_LINK_DOWN 0000
30#define BE_PORT_LINK_UP 0001
31#define BE_MAX_TX_FRAG_COUNT (30)
32
33/* Flag bits for send operation */
34#define IPCS (1 << 0) /* Enable IP checksum offload */
35#define UDPCS (1 << 1) /* Enable UDP checksum offload */
36#define TCPCS (1 << 2) /* Enable TCP checksum offload */
37#define LSO (1 << 3) /* Enable Large Segment offload */
38#define ETHVLAN (1 << 4) /* Enable VLAN insert */
39#define ETHEVENT (1 << 5) /* Generate event on completion */
40#define ETHCOMPLETE (1 << 6) /* Generate completion when done */
41#define IPSEC (1 << 7) /* Enable IPSEC */
42#define FORWARD (1 << 8) /* Send the packet in forwarding path */
43#define FIN (1 << 9) /* Issue FIN segment */
44
45#define BE_MAX_MTU 8974
46
47#define BE_MAX_LRO_DESCRIPTORS 8
48#define BE_LRO_MAX_PKTS 64
49#define BE_MAX_FRAGS_PER_FRAME 6
50
51extern const char be_drvr_ver[];
52extern char be_fw_ver[];
53extern char be_driver_name[];
54
55extern struct ethtool_ops be_ethtool_ops;
56
57#define BE_DEV_STATE_NONE 0
58#define BE_DEV_STATE_INIT 1
59#define BE_DEV_STATE_OPEN 2
60#define BE_DEV_STATE_SUSPEND 3
61
62/* This structure is used to describe physical fragments to use
63 * for DMAing data from NIC.
64 */
65struct be_recv_buffer {
66 struct list_head rxb_list; /* for maintaining a linked list */
67 void *rxb_va; /* buffer virtual address */
68 u32 rxb_pa_lo; /* low part of physical address */
69 u32 rxb_pa_hi; /* high part of physical address */
70 u32 rxb_len; /* length of recv buffer */
71 void *rxb_ctxt; /* context for OSM driver to use */
72};
73
74/*
75 * fragment list to describe scattered data.
76 */
77struct be_tx_frag_list {
78 u32 txb_len; /* Size of this fragment */
79 u32 txb_pa_lo; /* Lower 32 bits of 64 bit physical addr */
80 u32 txb_pa_hi; /* Higher 32 bits of 64 bit physical addr */
81};
82
83struct be_rx_page_info {
84 struct page *page;
85 dma_addr_t bus;
86 u16 page_offset;
87};
88
89/*
90 * This structure is the main tracking structure for a NIC interface.
91 */
92struct be_net_object {
93 /* MCC Ring - used to send fwcmds to embedded ARM processor */
94 struct MCC_WRB_AMAP *mcc_q; /* VA of the start of the ring */
95 u32 mcc_q_len; /* # of WRB entries in this ring */
96 u32 mcc_q_size;
97 u32 mcc_q_hd; /* MCC ring head */
98 u8 mcc_q_created; /* flag to help cleanup */
99 struct be_mcc_object mcc_q_obj; /* BECLIB's MCC ring Object */
100 dma_addr_t mcc_q_bus; /* DMA'ble bus address */
101
102 /* MCC Completion Ring - FW responses to fwcmds sent from MCC ring */
103 struct MCC_CQ_ENTRY_AMAP *mcc_cq; /* VA of the start of the ring */
104 u32 mcc_cq_len; /* # of compl. entries in this ring */
105 u32 mcc_cq_size;
106 u32 mcc_cq_tl; /* compl. ring tail */
107 u8 mcc_cq_created; /* flag to help cleanup */
108 struct be_cq_object mcc_cq_obj; /* BECLIB's MCC compl. ring object */
109 u32 mcc_cq_id; /* MCC ring ID */
110 dma_addr_t mcc_cq_bus; /* DMA'ble bus address */
111
112 struct ring_desc mb_rd; /* RD for MCC_MAIL_BOX */
113 void *mb_ptr; /* mailbox ptr to be freed */
114 dma_addr_t mb_bus; /* DMA'ble bus address */
115 u32 mb_size;
116
117 /* BEClib uses an array of context objects to track outstanding
118 * requests to the MCC. We need allocate the same number of
119 * conext entries as the number of entries in the MCC WRB ring
120 */
121 u32 mcc_wrb_ctxt_size;
122 void *mcc_wrb_ctxt; /* pointer to the context area */
123 u32 mcc_wrb_ctxtLen; /* Number of entries in the context */
124 /*
125 * NIC send request ring - used for xmitting raw ether frames.
126 */
127 struct ETH_WRB_AMAP *tx_q; /* VA of the start of the ring */
128 u32 tx_q_len; /* # if entries in the send ring */
129 u32 tx_q_size;
130 u32 tx_q_hd; /* Head index. Next req. goes here */
131 u32 tx_q_tl; /* Tail indx. oldest outstanding req. */
132 u8 tx_q_created; /* flag to help cleanup */
133 struct be_ethsq_object tx_q_obj;/* BECLIB's send Q handle */
134 dma_addr_t tx_q_bus; /* DMA'ble bus address */
135 u32 tx_q_id; /* send queue ring ID */
136 u32 tx_q_port; /* 0 no binding, 1 port A, 2 port B */
137 atomic_t tx_q_used; /* # of WRBs used */
138 /* ptr to an array in which we store context info for each send req. */
139 void **tx_ctxt;
140 /*
141 * NIC Send compl. ring - completion status for all NIC frames xmitted.
142 */
143 struct ETH_TX_COMPL_AMAP *tx_cq;/* VA of start of the ring */
144 u32 txcq_len; /* # of entries in the ring */
145 u32 tx_cq_size;
146 /*
147 * index into compl ring where the host expects next completion entry
148 */
149 u32 tx_cq_tl;
150 u32 tx_cq_id; /* completion queue id */
151 u8 tx_cq_created; /* flag to help cleanup */
152 struct be_cq_object tx_cq_obj;
153 dma_addr_t tx_cq_bus; /* DMA'ble bus address */
154 /*
155 * Event Queue - all completion entries post events here.
156 */
157 struct EQ_ENTRY_AMAP *event_q; /* VA of start of event queue */
158 u32 event_q_len; /* # of entries */
159 u32 event_q_size;
160 u32 event_q_tl; /* Tail of the event queue */
161 u32 event_q_id; /* Event queue ID */
162 u8 event_q_created; /* flag to help cleanup */
163 struct be_eq_object event_q_obj; /* Queue handle */
164 dma_addr_t event_q_bus; /* DMA'ble bus address */
165 /*
166 * NIC receive queue - Data buffers to be used for receiving unicast,
167 * broadcast and multi-cast frames are posted here.
168 */
169 struct ETH_RX_D_AMAP *rx_q; /* VA of start of the queue */
170 u32 rx_q_len; /* # of entries */
171 u32 rx_q_size;
172 u32 rx_q_hd; /* Head of the queue */
173 atomic_t rx_q_posted; /* number of posted buffers */
174 u32 rx_q_id; /* queue ID */
175 u8 rx_q_created; /* flag to help cleanup */
176 struct be_ethrq_object rx_q_obj; /* NIC RX queue handle */
177 dma_addr_t rx_q_bus; /* DMA'ble bus address */
178 /*
179 * Pointer to an array of opaque context object for use by OSM driver
180 */
181 void **rx_ctxt;
182 /*
183 * NIC unicast RX completion queue - all unicast ether frame completion
184 * statuses from BE come here.
185 */
186 struct ETH_RX_COMPL_AMAP *rx_cq; /* VA of start of the queue */
187 u32 rx_cq_len; /* # of entries */
188 u32 rx_cq_size;
189 u32 rx_cq_tl; /* Tail of the queue */
190 u32 rx_cq_id; /* queue ID */
191 u8 rx_cq_created; /* flag to help cleanup */
192 struct be_cq_object rx_cq_obj; /* queue handle */
193 dma_addr_t rx_cq_bus; /* DMA'ble bus address */
194 struct be_function_object fn_obj; /* function object */
195 bool fn_obj_created;
196 u32 rx_buf_size; /* Size of the RX buffers */
197
198 struct net_device *netdev;
199 struct be_recv_buffer eth_rx_bufs[256]; /* to pass Rx buffer
200 addresses */
201 struct be_adapter *adapter; /* Pointer to OSM adapter */
202 u32 devno; /* OSM, network dev no. */
203 u32 use_port; /* Current active port */
204 struct be_rx_page_info *rx_page_info; /* Array of Rx buf pages */
205 u32 rx_pg_info_hd; /* Head of queue */
206 int rxbuf_post_fail; /* RxBuff posting fail count */
207 bool rx_pg_shared; /* Is an allocsted page shared as two frags ? */
208 struct vlan_group *vlan_grp;
209 u32 num_vlans; /* Number of vlans in BE's filter */
210 u16 vlan_tag[BE_NUM_VLAN_SUPPORTED]; /* vlans currently configured */
211 struct napi_struct napi;
212 struct net_lro_mgr lro_mgr;
213 struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
214};
215
216#define NET_FH(np) (&(np)->fn_obj)
217
218/*
219 * BE driver statistics.
220 */
221struct be_drvr_stat {
222 u32 bes_tx_reqs; /* number of TX requests initiated */
223 u32 bes_tx_fails; /* number of TX requests that failed */
224 u32 bes_fwd_reqs; /* number of send reqs through forwarding i/f */
225 u32 bes_tx_wrbs; /* number of tx WRBs used */
226
227 u32 bes_ints; /* number of interrupts */
228 u32 bes_polls; /* number of times NAPI called poll function */
229 u32 bes_events; /* total evet entries processed */
230 u32 bes_tx_events; /* number of tx completion events */
231 u32 bes_rx_events; /* number of ucast rx completion events */
232 u32 bes_tx_compl; /* number of tx completion entries processed */
233 u32 bes_rx_compl; /* number of rx completion entries
234 processed */
235 u32 bes_ethrx_post_fail; /* number of ethrx buffer alloc
236 failures */
237 /*
238 * number of non ether type II frames dropped where
239 * frame len > length field of Mac Hdr
240 */
241 u32 bes_802_3_dropped_frames;
242 /*
243 * number of non ether type II frames malformed where
244 * in frame len < length field of Mac Hdr
245 */
246 u32 bes_802_3_malformed_frames;
247 u32 bes_ips; /* interrupts / sec */
248 u32 bes_prev_ints; /* bes_ints at last IPS calculation */
249 u16 bes_eth_tx_rate; /* ETH TX rate - Mb/sec */
250 u16 bes_eth_rx_rate; /* ETH RX rate - Mb/sec */
251 u32 bes_rx_coal; /* Num pkts coalasced */
252 u32 bes_rx_flush; /* Num times coalasced */
253 u32 bes_link_change_physical; /*Num of times physical link changed */
254 u32 bes_link_change_virtual; /*Num of times virtual link changed */
255 u32 bes_rx_misc_pkts; /* Misc pkts received */
256};
257
258/* Maximum interrupt delay (in microseconds) allowed */
259#define MAX_EQD 120
260
261/*
262 * timer to prevent system shutdown hang for ever if h/w stops responding
263 */
264struct be_timer_ctxt {
265 atomic_t get_stat_flag;
266 struct timer_list get_stats_timer;
267 unsigned long get_stat_sem_addr;
268} ;
269
270/* This structure is the main BladeEngine driver context. */
271struct be_adapter {
272 struct net_device *netdevp;
273 struct be_drvr_stat be_stat;
274 struct net_device_stats benet_stats;
275
276 /* PCI BAR mapped addresses */
277 u8 __iomem *csr_va; /* CSR */
278 u8 __iomem *db_va; /* Door Bell */
279 u8 __iomem *pci_va; /* PCI Config */
280
281 struct tasklet_struct sts_handler;
282 struct timer_list cq_timer;
283 spinlock_t int_lock; /* to protect the isr field in adapter */
284
285 struct FWCMD_ETH_GET_STATISTICS *eth_statsp;
286 /*
287 * This will enable the use of ethtool to enable or disable
288 * Checksum on Rx pkts to be obeyed or disobeyed.
289 * If this is true = 1, then whatever is the checksum on the
290 * Received pkt as per BE, it will be given to the stack.
291 * Else the stack will re calculate it.
292 */
293 bool rx_csum;
294 /*
295 * This will enable the use of ethtool to enable or disable
296 * Coalese on Rx pkts to be obeyed or disobeyed.
297 * If this is grater than 0 and less than 16 then coalascing
298 * is enabled else it is disabled
299 */
300 u32 max_rx_coal;
301 struct pci_dev *pdev; /* Pointer to OS's PCI dvice */
302
303 spinlock_t txq_lock; /* to stop/wake queue based on tx_q_used */
304
305 u32 isr; /* copy of Intr status reg. */
306
307 u32 port0_link_sts; /* Port 0 link status */
308 u32 port1_link_sts; /* port 1 list status */
309 struct BE_LINK_STATUS *be_link_sts;
310
311 /* pointer to the first netobject of this adapter */
312 struct be_net_object *net_obj;
313
314 /* Flags to indicate what to clean up */
315 bool tasklet_started;
316 bool isr_registered;
317 /*
318 * adaptive interrupt coalescing (AIC) related
319 */
320 bool enable_aic; /* 1 if AIC is enabled */
321 u16 min_eqd; /* minimum EQ delay in usec */
322 u16 max_eqd; /* minimum EQ delay in usec */
323 u16 cur_eqd; /* current EQ delay in usec */
324 /*
325 * book keeping for interrupt / sec and TX/RX rate calculation
326 */
327 ulong ips_jiffies; /* jiffies at last IPS calc */
328 u32 eth_tx_bytes;
329 ulong eth_tx_jiffies;
330 u32 eth_rx_bytes;
331 ulong eth_rx_jiffies;
332
333 struct semaphore get_eth_stat_sem;
334
335 /* timer ctxt to prevent shutdown hanging due to un-responsive BE */
336 struct be_timer_ctxt timer_ctxt;
337
338#define BE_MAX_MSIX_VECTORS 32
339#define BE_MAX_REQ_MSIX_VECTORS 1 /* only one EQ in Linux driver */
340 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
341 bool msix_enabled;
342 bool dma_64bit_cap; /* the Device DAC capable or not */
343 u8 dev_state; /* The current state of the device */
344 u8 dev_pm_state; /* The State of device before going to suspend */
345};
346
347/*
348 * Every second we look at the ints/sec and adjust eq_delay
349 * between adapter->min_eqd and adapter->max_eqd to keep the ints/sec between
350 * IPS_HI_WM and IPS_LO_WM.
351 */
352#define IPS_HI_WM 18000
353#define IPS_LO_WM 8000
354
355
356static inline void index_adv(u32 *index, u32 val, u32 limit)
357{
358 BUG_ON(limit & (limit-1));
359 *index = (*index + val) & (limit - 1);
360}
361
362static inline void index_inc(u32 *index, u32 limit)
363{
364 BUG_ON(limit & (limit-1));
365 *index = (*index + 1) & (limit - 1);
366}
367
368static inline void be_adv_eq_tl(struct be_net_object *pnob)
369{
370 index_inc(&pnob->event_q_tl, pnob->event_q_len);
371}
372
373static inline void be_adv_txq_hd(struct be_net_object *pnob)
374{
375 index_inc(&pnob->tx_q_hd, pnob->tx_q_len);
376}
377
378static inline void be_adv_txq_tl(struct be_net_object *pnob)
379{
380 index_inc(&pnob->tx_q_tl, pnob->tx_q_len);
381}
382
383static inline void be_adv_txcq_tl(struct be_net_object *pnob)
384{
385 index_inc(&pnob->tx_cq_tl, pnob->txcq_len);
386}
387
388static inline void be_adv_rxq_hd(struct be_net_object *pnob)
389{
390 index_inc(&pnob->rx_q_hd, pnob->rx_q_len);
391}
392
393static inline void be_adv_rxcq_tl(struct be_net_object *pnob)
394{
395 index_inc(&pnob->rx_cq_tl, pnob->rx_cq_len);
396}
397
398static inline u32 tx_compl_lastwrb_idx_get(struct be_net_object *pnob)
399{
400 return (pnob->tx_q_tl + *(u32 *)&pnob->tx_ctxt[pnob->tx_q_tl] - 1)
401 & (pnob->tx_q_len - 1);
402}
403
404int benet_init(struct net_device *);
405int be_ethtool_ioctl(struct net_device *, struct ifreq *);
406struct net_device_stats *benet_get_stats(struct net_device *);
407void be_process_intr(unsigned long context);
408irqreturn_t be_int(int irq, void *dev);
409void be_post_eth_rx_buffs(struct be_net_object *);
410void be_get_stat_cb(void *, int, struct MCC_WRB_AMAP *);
411void be_get_stats_timer_handler(unsigned long);
412void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *);
413void be_print_link_info(struct BE_LINK_STATUS *);
414void be_update_link_status(struct be_adapter *);
415void be_init_procfs(struct be_adapter *);
416void be_cleanup_procfs(struct be_adapter *);
417int be_poll(struct napi_struct *, int);
418struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *);
419void be_notify_cmpl(struct be_net_object *, int, int, int);
420void be_enable_intr(struct be_net_object *);
421void be_enable_eq_intr(struct be_net_object *);
422void be_disable_intr(struct be_net_object *);
423void be_disable_eq_intr(struct be_net_object *);
424int be_set_uc_mac_adr(struct be_net_object *, u8, u8, u8,
425 u8 *, mcc_wrb_cqe_callback, void *);
426int be_get_flow_ctl(struct be_function_object *pFnObj, bool *, bool *);
427void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx);
428
429#endif /* _BENET_H_ */
diff --git a/drivers/staging/benet/bestatus.h b/drivers/staging/benet/bestatus.h
deleted file mode 100644
index 59c7a4b62223..000000000000
--- a/drivers/staging/benet/bestatus.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#ifndef _BESTATUS_H_
18#define _BESTATUS_H_
19
20#define BE_SUCCESS (0x00000000L)
21/*
22 * MessageId: BE_PENDING
23 * The BladeEngine Driver call succeeded, and pended operation.
24 */
25#define BE_PENDING (0x20070001L)
26#define BE_STATUS_PENDING (BE_PENDING)
27/*
28 * MessageId: BE_NOT_OK
29 * An error occurred.
30 */
31#define BE_NOT_OK (0xE0070002L)
32/*
33 * MessageId: BE_STATUS_SYSTEM_RESOURCES
34 * Insufficient host system resources exist to complete the API.
35 */
36#define BE_STATUS_SYSTEM_RESOURCES (0xE0070003L)
37/*
38 * MessageId: BE_STATUS_CHIP_RESOURCES
39 * Insufficient chip resources exist to complete the API.
40 */
41#define BE_STATUS_CHIP_RESOURCES (0xE0070004L)
42/*
43 * MessageId: BE_STATUS_NO_RESOURCE
44 * Insufficient resources to complete request.
45 */
46#define BE_STATUS_NO_RESOURCE (0xE0070005L)
47/*
48 * MessageId: BE_STATUS_BUSY
49 * Resource is currently busy.
50 */
51#define BE_STATUS_BUSY (0xE0070006L)
52/*
53 * MessageId: BE_STATUS_INVALID_PARAMETER
54 * Invalid Parameter in request.
55 */
56#define BE_STATUS_INVALID_PARAMETER (0xE0000007L)
57/*
58 * MessageId: BE_STATUS_NOT_SUPPORTED
59 * Requested operation is not supported.
60 */
61#define BE_STATUS_NOT_SUPPORTED (0xE000000DL)
62
63/*
64 * ***************************************************************************
65 * E T H E R N E T S T A T U S
66 * ***************************************************************************
67 */
68
69/*
70 * MessageId: BE_ETH_TX_ERROR
71 * The Ethernet device driver failed to transmit a packet.
72 */
73#define BE_ETH_TX_ERROR (0xE0070101L)
74
75/*
76 * ***************************************************************************
77 * S H A R E D S T A T U S
78 * ***************************************************************************
79 */
80
81/*
82 * MessageId: BE_STATUS_VBD_INVALID_VERSION
83 * The device driver is not compatible with this version of the VBD.
84 */
85#define BE_STATUS_INVALID_VERSION (0xE0070402L)
86/*
87 * MessageId: BE_STATUS_DOMAIN_DENIED
88 * The operation failed to complete due to insufficient access
89 * rights for the requesting domain.
90 */
91#define BE_STATUS_DOMAIN_DENIED (0xE0070403L)
92/*
93 * MessageId: BE_STATUS_TCP_NOT_STARTED
94 * The embedded TCP/IP stack has not been started.
95 */
96#define BE_STATUS_TCP_NOT_STARTED (0xE0070409L)
97/*
98 * MessageId: BE_STATUS_NO_MCC_WRB
99 * No free MCC WRB are available for posting the request.
100 */
101#define BE_STATUS_NO_MCC_WRB (0xE0070414L)
102
103#endif /* _BESTATUS_ */
diff --git a/drivers/staging/benet/cev.h b/drivers/staging/benet/cev.h
deleted file mode 100644
index 30996920a544..000000000000
--- a/drivers/staging/benet/cev.h
+++ /dev/null
@@ -1,243 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __cev_amap_h__
21#define __cev_amap_h__
22#include "ep.h"
23
24/*
25 * Host Interrupt Status Register 0. The first of four application
26 * interrupt status registers. This register contains the interrupts
27 * for Event Queues EQ0 through EQ31.
28 */
29struct BE_CEV_ISR0_CSR_AMAP {
30 u8 interrupt0; /* DWORD 0 */
31 u8 interrupt1; /* DWORD 0 */
32 u8 interrupt2; /* DWORD 0 */
33 u8 interrupt3; /* DWORD 0 */
34 u8 interrupt4; /* DWORD 0 */
35 u8 interrupt5; /* DWORD 0 */
36 u8 interrupt6; /* DWORD 0 */
37 u8 interrupt7; /* DWORD 0 */
38 u8 interrupt8; /* DWORD 0 */
39 u8 interrupt9; /* DWORD 0 */
40 u8 interrupt10; /* DWORD 0 */
41 u8 interrupt11; /* DWORD 0 */
42 u8 interrupt12; /* DWORD 0 */
43 u8 interrupt13; /* DWORD 0 */
44 u8 interrupt14; /* DWORD 0 */
45 u8 interrupt15; /* DWORD 0 */
46 u8 interrupt16; /* DWORD 0 */
47 u8 interrupt17; /* DWORD 0 */
48 u8 interrupt18; /* DWORD 0 */
49 u8 interrupt19; /* DWORD 0 */
50 u8 interrupt20; /* DWORD 0 */
51 u8 interrupt21; /* DWORD 0 */
52 u8 interrupt22; /* DWORD 0 */
53 u8 interrupt23; /* DWORD 0 */
54 u8 interrupt24; /* DWORD 0 */
55 u8 interrupt25; /* DWORD 0 */
56 u8 interrupt26; /* DWORD 0 */
57 u8 interrupt27; /* DWORD 0 */
58 u8 interrupt28; /* DWORD 0 */
59 u8 interrupt29; /* DWORD 0 */
60 u8 interrupt30; /* DWORD 0 */
61 u8 interrupt31; /* DWORD 0 */
62} __packed;
63struct CEV_ISR0_CSR_AMAP {
64 u32 dw[1];
65};
66
67/*
68 * Host Interrupt Status Register 1. The second of four application
69 * interrupt status registers. This register contains the interrupts
70 * for Event Queues EQ32 through EQ63.
71 */
72struct BE_CEV_ISR1_CSR_AMAP {
73 u8 interrupt32; /* DWORD 0 */
74 u8 interrupt33; /* DWORD 0 */
75 u8 interrupt34; /* DWORD 0 */
76 u8 interrupt35; /* DWORD 0 */
77 u8 interrupt36; /* DWORD 0 */
78 u8 interrupt37; /* DWORD 0 */
79 u8 interrupt38; /* DWORD 0 */
80 u8 interrupt39; /* DWORD 0 */
81 u8 interrupt40; /* DWORD 0 */
82 u8 interrupt41; /* DWORD 0 */
83 u8 interrupt42; /* DWORD 0 */
84 u8 interrupt43; /* DWORD 0 */
85 u8 interrupt44; /* DWORD 0 */
86 u8 interrupt45; /* DWORD 0 */
87 u8 interrupt46; /* DWORD 0 */
88 u8 interrupt47; /* DWORD 0 */
89 u8 interrupt48; /* DWORD 0 */
90 u8 interrupt49; /* DWORD 0 */
91 u8 interrupt50; /* DWORD 0 */
92 u8 interrupt51; /* DWORD 0 */
93 u8 interrupt52; /* DWORD 0 */
94 u8 interrupt53; /* DWORD 0 */
95 u8 interrupt54; /* DWORD 0 */
96 u8 interrupt55; /* DWORD 0 */
97 u8 interrupt56; /* DWORD 0 */
98 u8 interrupt57; /* DWORD 0 */
99 u8 interrupt58; /* DWORD 0 */
100 u8 interrupt59; /* DWORD 0 */
101 u8 interrupt60; /* DWORD 0 */
102 u8 interrupt61; /* DWORD 0 */
103 u8 interrupt62; /* DWORD 0 */
104 u8 interrupt63; /* DWORD 0 */
105} __packed;
106struct CEV_ISR1_CSR_AMAP {
107 u32 dw[1];
108};
109/*
110 * Host Interrupt Status Register 2. The third of four application
111 * interrupt status registers. This register contains the interrupts
112 * for Event Queues EQ64 through EQ95.
113 */
114struct BE_CEV_ISR2_CSR_AMAP {
115 u8 interrupt64; /* DWORD 0 */
116 u8 interrupt65; /* DWORD 0 */
117 u8 interrupt66; /* DWORD 0 */
118 u8 interrupt67; /* DWORD 0 */
119 u8 interrupt68; /* DWORD 0 */
120 u8 interrupt69; /* DWORD 0 */
121 u8 interrupt70; /* DWORD 0 */
122 u8 interrupt71; /* DWORD 0 */
123 u8 interrupt72; /* DWORD 0 */
124 u8 interrupt73; /* DWORD 0 */
125 u8 interrupt74; /* DWORD 0 */
126 u8 interrupt75; /* DWORD 0 */
127 u8 interrupt76; /* DWORD 0 */
128 u8 interrupt77; /* DWORD 0 */
129 u8 interrupt78; /* DWORD 0 */
130 u8 interrupt79; /* DWORD 0 */
131 u8 interrupt80; /* DWORD 0 */
132 u8 interrupt81; /* DWORD 0 */
133 u8 interrupt82; /* DWORD 0 */
134 u8 interrupt83; /* DWORD 0 */
135 u8 interrupt84; /* DWORD 0 */
136 u8 interrupt85; /* DWORD 0 */
137 u8 interrupt86; /* DWORD 0 */
138 u8 interrupt87; /* DWORD 0 */
139 u8 interrupt88; /* DWORD 0 */
140 u8 interrupt89; /* DWORD 0 */
141 u8 interrupt90; /* DWORD 0 */
142 u8 interrupt91; /* DWORD 0 */
143 u8 interrupt92; /* DWORD 0 */
144 u8 interrupt93; /* DWORD 0 */
145 u8 interrupt94; /* DWORD 0 */
146 u8 interrupt95; /* DWORD 0 */
147} __packed;
148struct CEV_ISR2_CSR_AMAP {
149 u32 dw[1];
150};
151
152/*
153 * Host Interrupt Status Register 3. The fourth of four application
154 * interrupt status registers. This register contains the interrupts
155 * for Event Queues EQ96 through EQ127.
156 */
157struct BE_CEV_ISR3_CSR_AMAP {
158 u8 interrupt96; /* DWORD 0 */
159 u8 interrupt97; /* DWORD 0 */
160 u8 interrupt98; /* DWORD 0 */
161 u8 interrupt99; /* DWORD 0 */
162 u8 interrupt100; /* DWORD 0 */
163 u8 interrupt101; /* DWORD 0 */
164 u8 interrupt102; /* DWORD 0 */
165 u8 interrupt103; /* DWORD 0 */
166 u8 interrupt104; /* DWORD 0 */
167 u8 interrupt105; /* DWORD 0 */
168 u8 interrupt106; /* DWORD 0 */
169 u8 interrupt107; /* DWORD 0 */
170 u8 interrupt108; /* DWORD 0 */
171 u8 interrupt109; /* DWORD 0 */
172 u8 interrupt110; /* DWORD 0 */
173 u8 interrupt111; /* DWORD 0 */
174 u8 interrupt112; /* DWORD 0 */
175 u8 interrupt113; /* DWORD 0 */
176 u8 interrupt114; /* DWORD 0 */
177 u8 interrupt115; /* DWORD 0 */
178 u8 interrupt116; /* DWORD 0 */
179 u8 interrupt117; /* DWORD 0 */
180 u8 interrupt118; /* DWORD 0 */
181 u8 interrupt119; /* DWORD 0 */
182 u8 interrupt120; /* DWORD 0 */
183 u8 interrupt121; /* DWORD 0 */
184 u8 interrupt122; /* DWORD 0 */
185 u8 interrupt123; /* DWORD 0 */
186 u8 interrupt124; /* DWORD 0 */
187 u8 interrupt125; /* DWORD 0 */
188 u8 interrupt126; /* DWORD 0 */
189 u8 interrupt127; /* DWORD 0 */
190} __packed;
191struct CEV_ISR3_CSR_AMAP {
192 u32 dw[1];
193};
194
195/* Completions and Events block Registers. */
196struct BE_CEV_CSRMAP_AMAP {
197 u8 rsvd0[32]; /* DWORD 0 */
198 u8 rsvd1[32]; /* DWORD 1 */
199 u8 rsvd2[32]; /* DWORD 2 */
200 u8 rsvd3[32]; /* DWORD 3 */
201 struct BE_CEV_ISR0_CSR_AMAP isr0;
202 struct BE_CEV_ISR1_CSR_AMAP isr1;
203 struct BE_CEV_ISR2_CSR_AMAP isr2;
204 struct BE_CEV_ISR3_CSR_AMAP isr3;
205 u8 rsvd4[32]; /* DWORD 8 */
206 u8 rsvd5[32]; /* DWORD 9 */
207 u8 rsvd6[32]; /* DWORD 10 */
208 u8 rsvd7[32]; /* DWORD 11 */
209 u8 rsvd8[32]; /* DWORD 12 */
210 u8 rsvd9[32]; /* DWORD 13 */
211 u8 rsvd10[32]; /* DWORD 14 */
212 u8 rsvd11[32]; /* DWORD 15 */
213 u8 rsvd12[32]; /* DWORD 16 */
214 u8 rsvd13[32]; /* DWORD 17 */
215 u8 rsvd14[32]; /* DWORD 18 */
216 u8 rsvd15[32]; /* DWORD 19 */
217 u8 rsvd16[32]; /* DWORD 20 */
218 u8 rsvd17[32]; /* DWORD 21 */
219 u8 rsvd18[32]; /* DWORD 22 */
220 u8 rsvd19[32]; /* DWORD 23 */
221 u8 rsvd20[32]; /* DWORD 24 */
222 u8 rsvd21[32]; /* DWORD 25 */
223 u8 rsvd22[32]; /* DWORD 26 */
224 u8 rsvd23[32]; /* DWORD 27 */
225 u8 rsvd24[32]; /* DWORD 28 */
226 u8 rsvd25[32]; /* DWORD 29 */
227 u8 rsvd26[32]; /* DWORD 30 */
228 u8 rsvd27[32]; /* DWORD 31 */
229 u8 rsvd28[32]; /* DWORD 32 */
230 u8 rsvd29[32]; /* DWORD 33 */
231 u8 rsvd30[192]; /* DWORD 34 */
232 u8 rsvd31[192]; /* DWORD 40 */
233 u8 rsvd32[160]; /* DWORD 46 */
234 u8 rsvd33[160]; /* DWORD 51 */
235 u8 rsvd34[160]; /* DWORD 56 */
236 u8 rsvd35[96]; /* DWORD 61 */
237 u8 rsvd36[192][32]; /* DWORD 64 */
238} __packed;
239struct CEV_CSRMAP_AMAP {
240 u32 dw[256];
241};
242
243#endif /* __cev_amap_h__ */
diff --git a/drivers/staging/benet/cq.c b/drivers/staging/benet/cq.c
deleted file mode 100644
index 650458645433..000000000000
--- a/drivers/staging/benet/cq.c
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#include "hwlib.h"
18#include "bestatus.h"
19
20/*
21 * Completion Queue Objects
22 */
23/*
24 *============================================================================
25 * P U B L I C R O U T I N E S
26 *============================================================================
27 */
28
29/*
30 This routine creates a completion queue based on the client completion
31 queue configuration information.
32
33
34 FunctionObject - Handle to a function object
35 CqBaseVa - Base VA for a the CQ ring
36 NumEntries - CEV_CQ_CNT_* values
37 solEventEnable - 0 = All CQEs can generate Events if CQ is eventable
38 1 = only CQEs with solicited bit set are eventable
39 eventable - Eventable CQ, generates interrupts.
40 nodelay - 1 = Force interrupt, relevent if CQ eventable.
41 Interrupt is asserted immediately after EQE
42 write is confirmed, regardless of EQ Timer
43 or watermark settings.
44 wme - Enable watermark based coalescing
45 wmThresh - High watermark(CQ fullness at which event
46 or interrupt should be asserted). These are the
47 CEV_WATERMARK encoded values.
48 EqObject - EQ Handle to assign to this CQ
49 ppCqObject - Internal CQ Handle returned.
50
51 Returns BE_SUCCESS if successfull, otherwise a useful error code is
52 returned.
53
54 IRQL < DISPATCH_LEVEL
55
56*/
57int be_cq_create(struct be_function_object *pfob,
58 struct ring_desc *rd, u32 length, bool solicited_eventable,
59 bool no_delay, u32 wm_thresh,
60 struct be_eq_object *eq_object, struct be_cq_object *cq_object)
61{
62 int status = BE_SUCCESS;
63 u32 num_entries_encoding;
64 u32 num_entries = length / sizeof(struct MCC_CQ_ENTRY_AMAP);
65 struct FWCMD_COMMON_CQ_CREATE *fwcmd = NULL;
66 struct MCC_WRB_AMAP *wrb = NULL;
67 u32 n;
68 unsigned long irql;
69
70 ASSERT(rd);
71 ASSERT(cq_object);
72 ASSERT(length % sizeof(struct MCC_CQ_ENTRY_AMAP) == 0);
73
74 switch (num_entries) {
75 case 256:
76 num_entries_encoding = CEV_CQ_CNT_256;
77 break;
78 case 512:
79 num_entries_encoding = CEV_CQ_CNT_512;
80 break;
81 case 1024:
82 num_entries_encoding = CEV_CQ_CNT_1024;
83 break;
84 default:
85 ASSERT(0);
86 return BE_STATUS_INVALID_PARAMETER;
87 }
88
89 /*
90 * All cq entries all the same size. Use iSCSI version
91 * as a test for the proper rd length.
92 */
93 memset(cq_object, 0, sizeof(*cq_object));
94
95 atomic_set(&cq_object->ref_count, 0);
96 cq_object->parent_function = pfob;
97 cq_object->eq_object = eq_object;
98 cq_object->num_entries = num_entries;
99 /* save for MCC cq processing */
100 cq_object->va = rd->va;
101
102 /* map into UT. */
103 length = num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP);
104
105 spin_lock_irqsave(&pfob->post_lock, irql);
106
107 wrb = be_function_peek_mcc_wrb(pfob);
108 if (!wrb) {
109 ASSERT(wrb);
110 TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
111 status = BE_STATUS_NO_MCC_WRB;
112 goto Error;
113 }
114 /* Prepares an embedded fwcmd, including request/response sizes. */
115 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_CQ_CREATE);
116
117 fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
118 length);
119
120 AMAP_SET_BITS_PTR(CQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
121 n = pfob->pci_function_number;
122 AMAP_SET_BITS_PTR(CQ_CONTEXT, Func, &fwcmd->params.request.context, n);
123
124 n = (eq_object != NULL);
125 AMAP_SET_BITS_PTR(CQ_CONTEXT, Eventable,
126 &fwcmd->params.request.context, n);
127 AMAP_SET_BITS_PTR(CQ_CONTEXT, Armed, &fwcmd->params.request.context, 1);
128
129 n = eq_object ? eq_object->eq_id : 0;
130 AMAP_SET_BITS_PTR(CQ_CONTEXT, EQID, &fwcmd->params.request.context, n);
131 AMAP_SET_BITS_PTR(CQ_CONTEXT, Count,
132 &fwcmd->params.request.context, num_entries_encoding);
133
134 n = 0; /* Protection Domain is always 0 in Linux driver */
135 AMAP_SET_BITS_PTR(CQ_CONTEXT, PD, &fwcmd->params.request.context, n);
136 AMAP_SET_BITS_PTR(CQ_CONTEXT, NoDelay,
137 &fwcmd->params.request.context, no_delay);
138 AMAP_SET_BITS_PTR(CQ_CONTEXT, SolEvent,
139 &fwcmd->params.request.context, solicited_eventable);
140
141 n = (wm_thresh != 0xFFFFFFFF);
142 AMAP_SET_BITS_PTR(CQ_CONTEXT, WME, &fwcmd->params.request.context, n);
143
144 n = (n ? wm_thresh : 0);
145 AMAP_SET_BITS_PTR(CQ_CONTEXT, Watermark,
146 &fwcmd->params.request.context, n);
147 /* Create a page list for the FWCMD. */
148 be_rd_to_pa_list(rd, fwcmd->params.request.pages,
149 ARRAY_SIZE(fwcmd->params.request.pages));
150
151 /* Post the f/w command */
152 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
153 NULL, NULL, fwcmd, NULL);
154 if (status != BE_SUCCESS) {
155 TRACE(DL_ERR, "MCC to create CQ failed.");
156 goto Error;
157 }
158 /* Remember the CQ id. */
159 cq_object->cq_id = fwcmd->params.response.cq_id;
160
161 /* insert this cq into eq_object reference */
162 if (eq_object) {
163 atomic_inc(&eq_object->ref_count);
164 list_add_tail(&cq_object->cqlist_for_eq,
165 &eq_object->cq_list_head);
166 }
167
168Error:
169 spin_unlock_irqrestore(&pfob->post_lock, irql);
170
171 if (pfob->pend_queue_driving && pfob->mcc) {
172 pfob->pend_queue_driving = 0;
173 be_drive_mcc_wrb_queue(pfob->mcc);
174 }
175 return status;
176}
177
178/*
179
180 Deferences the given object. Once the object's reference count drops to
181 zero, the object is destroyed and all resources that are held by this object
182 are released. The on-chip context is also destroyed along with the queue
183 ID, and any mappings made into the UT.
184
185 cq_object - CQ handle returned from cq_object_create.
186
187 returns the current reference count on the object
188
189 IRQL: IRQL < DISPATCH_LEVEL
190*/
191int be_cq_destroy(struct be_cq_object *cq_object)
192{
193 int status = 0;
194
195 /* Nothing should reference this CQ at this point. */
196 ASSERT(atomic_read(&cq_object->ref_count) == 0);
197
198 /* Send fwcmd to destroy the CQ. */
199 status = be_function_ring_destroy(cq_object->parent_function,
200 cq_object->cq_id, FWCMD_RING_TYPE_CQ,
201 NULL, NULL, NULL, NULL);
202 ASSERT(status == 0);
203
204 /* Remove reference if this is an eventable CQ. */
205 if (cq_object->eq_object) {
206 atomic_dec(&cq_object->eq_object->ref_count);
207 list_del(&cq_object->cqlist_for_eq);
208 }
209 return BE_SUCCESS;
210}
211
diff --git a/drivers/staging/benet/descriptors.h b/drivers/staging/benet/descriptors.h
deleted file mode 100644
index 8da438c407d2..000000000000
--- a/drivers/staging/benet/descriptors.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __descriptors_amap_h__
21#define __descriptors_amap_h__
22
23/*
24 * --- IPC_NODE_ID_ENUM ---
25 * IPC processor id values
26 */
27#define TPOST_NODE_ID (0) /* TPOST ID */
28#define TPRE_NODE_ID (1) /* TPRE ID */
29#define TXULP0_NODE_ID (2) /* TXULP0 ID */
30#define TXULP1_NODE_ID (3) /* TXULP1 ID */
31#define TXULP2_NODE_ID (4) /* TXULP2 ID */
32#define RXULP0_NODE_ID (5) /* RXULP0 ID */
33#define RXULP1_NODE_ID (6) /* RXULP1 ID */
34#define RXULP2_NODE_ID (7) /* RXULP2 ID */
35#define MPU_NODE_ID (15) /* MPU ID */
36
37/*
38 * --- MAC_ID_ENUM ---
39 * Meaning of the mac_id field in rxpp_eth_d
40 */
41#define PORT0_HOST_MAC0 (0) /* PD 0, Port 0, host networking, MAC 0. */
42#define PORT0_HOST_MAC1 (1) /* PD 0, Port 0, host networking, MAC 1. */
43#define PORT0_STORAGE_MAC0 (2) /* PD 0, Port 0, host storage, MAC 0. */
44#define PORT0_STORAGE_MAC1 (3) /* PD 0, Port 0, host storage, MAC 1. */
45#define PORT1_HOST_MAC0 (4) /* PD 0, Port 1 host networking, MAC 0. */
46#define PORT1_HOST_MAC1 (5) /* PD 0, Port 1 host networking, MAC 1. */
47#define PORT1_STORAGE_MAC0 (6) /* PD 0, Port 1 host storage, MAC 0. */
48#define PORT1_STORAGE_MAC1 (7) /* PD 0, Port 1 host storage, MAC 1. */
49#define FIRST_VM_MAC (8) /* PD 1 MAC. Protection domains have IDs */
50 /* from 0x8-0x26, one per PD. */
51#define LAST_VM_MAC (38) /* PD 31 MAC. */
52#define MGMT_MAC (39) /* Management port MAC. */
53#define MARBLE_MAC0 (59) /* Used for flushing function 0 receive */
54 /*
55 * queues before re-using a torn-down
56 * receive ring. the DA =
57 * 00-00-00-00-00-00, and the MSB of the
58 * SA = 00
59 */
60#define MARBLE_MAC1 (60) /* Used for flushing function 1 receive */
61 /*
62 * queues before re-using a torn-down
63 * receive ring. the DA =
64 * 00-00-00-00-00-00, and the MSB of the
65 * SA != 00
66 */
67#define NULL_MAC (61) /* Promiscuous mode, indicates no match */
68#define MCAST_MAC (62) /* Multicast match. */
69#define BCAST_MATCH (63) /* Broadcast match. */
70
71#endif /* __descriptors_amap_h__ */
diff --git a/drivers/staging/benet/doorbells.h b/drivers/staging/benet/doorbells.h
deleted file mode 100644
index 550cc4d5d6f7..000000000000
--- a/drivers/staging/benet/doorbells.h
+++ /dev/null
@@ -1,179 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __doorbells_amap_h__
21#define __doorbells_amap_h__
22
23/* The TX/RDMA send queue doorbell. */
24struct BE_SQ_DB_AMAP {
25 u8 cid[11]; /* DWORD 0 */
26 u8 rsvd0[5]; /* DWORD 0 */
27 u8 numPosted[14]; /* DWORD 0 */
28 u8 rsvd1[2]; /* DWORD 0 */
29} __packed;
30struct SQ_DB_AMAP {
31 u32 dw[1];
32};
33
34/* The receive queue doorbell. */
35struct BE_RQ_DB_AMAP {
36 u8 rq[10]; /* DWORD 0 */
37 u8 rsvd0[13]; /* DWORD 0 */
38 u8 Invalidate; /* DWORD 0 */
39 u8 numPosted[8]; /* DWORD 0 */
40} __packed;
41struct RQ_DB_AMAP {
42 u32 dw[1];
43};
44
45/*
46 * The CQ/EQ doorbell. Software MUST set reserved fields in this
47 * descriptor to zero, otherwise (CEV) hardware will not execute the
48 * doorbell (flagging a bad_db_qid error instead).
49 */
50struct BE_CQ_DB_AMAP {
51 u8 qid[10]; /* DWORD 0 */
52 u8 rsvd0[4]; /* DWORD 0 */
53 u8 rearm; /* DWORD 0 */
54 u8 event; /* DWORD 0 */
55 u8 num_popped[13]; /* DWORD 0 */
56 u8 rsvd1[3]; /* DWORD 0 */
57} __packed;
58struct CQ_DB_AMAP {
59 u32 dw[1];
60};
61
62struct BE_TPM_RQ_DB_AMAP {
63 u8 qid[10]; /* DWORD 0 */
64 u8 rsvd0[6]; /* DWORD 0 */
65 u8 numPosted[11]; /* DWORD 0 */
66 u8 mss_cnt[5]; /* DWORD 0 */
67} __packed;
68struct TPM_RQ_DB_AMAP {
69 u32 dw[1];
70};
71
72/*
73 * Post WRB Queue Doorbell Register used by the host Storage stack
74 * to notify the controller of a posted Work Request Block
75 */
76struct BE_WRB_POST_DB_AMAP {
77 u8 wrb_cid[10]; /* DWORD 0 */
78 u8 rsvd0[6]; /* DWORD 0 */
79 u8 wrb_index[8]; /* DWORD 0 */
80 u8 numberPosted[8]; /* DWORD 0 */
81} __packed;
82struct WRB_POST_DB_AMAP {
83 u32 dw[1];
84};
85
86/*
87 * Update Default PDU Queue Doorbell Register used to communicate
88 * to the controller that the driver has stopped processing the queue
89 * and where in the queue it stopped, this is
90 * a CQ Entry Type. Used by storage driver.
91 */
92struct BE_DEFAULT_PDU_DB_AMAP {
93 u8 qid[10]; /* DWORD 0 */
94 u8 rsvd0[4]; /* DWORD 0 */
95 u8 rearm; /* DWORD 0 */
96 u8 event; /* DWORD 0 */
97 u8 cqproc[14]; /* DWORD 0 */
98 u8 rsvd1[2]; /* DWORD 0 */
99} __packed;
100struct DEFAULT_PDU_DB_AMAP {
101 u32 dw[1];
102};
103
104/* Management Command and Controller default fragment ring */
105struct BE_MCC_DB_AMAP {
106 u8 rid[11]; /* DWORD 0 */
107 u8 rsvd0[5]; /* DWORD 0 */
108 u8 numPosted[14]; /* DWORD 0 */
109 u8 rsvd1[2]; /* DWORD 0 */
110} __packed;
111struct MCC_DB_AMAP {
112 u32 dw[1];
113};
114
115/*
116 * Used for bootstrapping the Host interface. This register is
117 * used for driver communication with the MPU when no MCC Rings exist.
118 * The software must write this register twice to post any MCC
119 * command. First, it writes the register with hi=1 and the upper bits of
120 * the physical address for the MCC_MAILBOX structure. Software must poll
121 * the ready bit until this is acknowledged. Then, sotware writes the
122 * register with hi=0 with the lower bits in the address. It must
123 * poll the ready bit until the MCC command is complete. Upon completion,
124 * the MCC_MAILBOX will contain a valid completion queue entry.
125 */
126struct BE_MPU_MAILBOX_DB_AMAP {
127 u8 ready; /* DWORD 0 */
128 u8 hi; /* DWORD 0 */
129 u8 address[30]; /* DWORD 0 */
130} __packed;
131struct MPU_MAILBOX_DB_AMAP {
132 u32 dw[1];
133};
134
135/*
136 * This is the protection domain doorbell register map. Note that
137 * while this map shows doorbells for all Blade Engine supported
138 * protocols, not all of these may be valid in a given function or
139 * protection domain. It is the responsibility of the application
140 * accessing the doorbells to know which are valid. Each doorbell
141 * occupies 32 bytes of space, but unless otherwise specified,
142 * only the first 4 bytes should be written. There are 32 instances
143 * of these doorbells for the host and 31 virtual machines respectively.
144 * The host and VMs will only map the doorbell pages belonging to its
145 * protection domain. It will not be able to touch the doorbells for
146 * another VM. The doorbells are the only registers directly accessible
147 * by a virtual machine. Similarly, there are 511 additional
148 * doorbells for RDMA protection domains. PD 0 for RDMA shares
149 * the same physical protection domain doorbell page as ETH/iSCSI.
150 *
151 */
152struct BE_PROTECTION_DOMAIN_DBMAP_AMAP {
153 u8 rsvd0[512]; /* DWORD 0 */
154 struct BE_SQ_DB_AMAP rdma_sq_db;
155 u8 rsvd1[7][32]; /* DWORD 17 */
156 struct BE_WRB_POST_DB_AMAP iscsi_wrb_post_db;
157 u8 rsvd2[7][32]; /* DWORD 25 */
158 struct BE_SQ_DB_AMAP etx_sq_db;
159 u8 rsvd3[7][32]; /* DWORD 33 */
160 struct BE_RQ_DB_AMAP rdma_rq_db;
161 u8 rsvd4[7][32]; /* DWORD 41 */
162 struct BE_DEFAULT_PDU_DB_AMAP iscsi_default_pdu_db;
163 u8 rsvd5[7][32]; /* DWORD 49 */
164 struct BE_TPM_RQ_DB_AMAP tpm_rq_db;
165 u8 rsvd6[7][32]; /* DWORD 57 */
166 struct BE_RQ_DB_AMAP erx_rq_db;
167 u8 rsvd7[7][32]; /* DWORD 65 */
168 struct BE_CQ_DB_AMAP cq_db;
169 u8 rsvd8[7][32]; /* DWORD 73 */
170 struct BE_MCC_DB_AMAP mpu_mcc_db;
171 u8 rsvd9[7][32]; /* DWORD 81 */
172 struct BE_MPU_MAILBOX_DB_AMAP mcc_bootstrap_db;
173 u8 rsvd10[935][32]; /* DWORD 89 */
174} __packed;
175struct PROTECTION_DOMAIN_DBMAP_AMAP {
176 u32 dw[1024];
177};
178
179#endif /* __doorbells_amap_h__ */
diff --git a/drivers/staging/benet/ep.h b/drivers/staging/benet/ep.h
deleted file mode 100644
index 72fcf64a9ffb..000000000000
--- a/drivers/staging/benet/ep.h
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __ep_amap_h__
21#define __ep_amap_h__
22
23/* General Control and Status Register. */
24struct BE_EP_CONTROL_CSR_AMAP {
25 u8 m0_RxPbuf; /* DWORD 0 */
26 u8 m1_RxPbuf; /* DWORD 0 */
27 u8 m2_RxPbuf; /* DWORD 0 */
28 u8 ff_en; /* DWORD 0 */
29 u8 rsvd0[27]; /* DWORD 0 */
30 u8 CPU_reset; /* DWORD 0 */
31} __packed;
32struct EP_CONTROL_CSR_AMAP {
33 u32 dw[1];
34};
35
36/* Semaphore Register. */
37struct BE_EP_SEMAPHORE_CSR_AMAP {
38 u8 value[32]; /* DWORD 0 */
39} __packed;
40struct EP_SEMAPHORE_CSR_AMAP {
41 u32 dw[1];
42};
43
44/* Embedded Processor Specific Registers. */
45struct BE_EP_CSRMAP_AMAP {
46 struct BE_EP_CONTROL_CSR_AMAP ep_control;
47 u8 rsvd0[32]; /* DWORD 1 */
48 u8 rsvd1[32]; /* DWORD 2 */
49 u8 rsvd2[32]; /* DWORD 3 */
50 u8 rsvd3[32]; /* DWORD 4 */
51 u8 rsvd4[32]; /* DWORD 5 */
52 u8 rsvd5[8][128]; /* DWORD 6 */
53 u8 rsvd6[32]; /* DWORD 38 */
54 u8 rsvd7[32]; /* DWORD 39 */
55 u8 rsvd8[32]; /* DWORD 40 */
56 u8 rsvd9[32]; /* DWORD 41 */
57 u8 rsvd10[32]; /* DWORD 42 */
58 struct BE_EP_SEMAPHORE_CSR_AMAP ep_semaphore;
59 u8 rsvd11[32]; /* DWORD 44 */
60 u8 rsvd12[19][32]; /* DWORD 45 */
61} __packed;
62struct EP_CSRMAP_AMAP {
63 u32 dw[64];
64};
65
66#endif /* __ep_amap_h__ */
diff --git a/drivers/staging/benet/eq.c b/drivers/staging/benet/eq.c
deleted file mode 100644
index db92ccd8fed8..000000000000
--- a/drivers/staging/benet/eq.c
+++ /dev/null
@@ -1,299 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#include "hwlib.h"
18#include "bestatus.h"
19/*
20 This routine creates an event queue based on the client completion
21 queue configuration information.
22
23 FunctionObject - Handle to a function object
24 EqBaseVa - Base VA for a the EQ ring
25 SizeEncoding - The encoded size for the EQ entries. This value is
26 either CEV_EQ_SIZE_4 or CEV_EQ_SIZE_16
27 NumEntries - CEV_CQ_CNT_* values.
28 Watermark - Enables watermark based coalescing. This parameter
29 must be of the type CEV_WMARK_* if watermarks
30 are enabled. If watermarks to to be disabled
31 this value should be-1.
32 TimerDelay - If a timer delay is enabled this value should be the
33 time of the delay in 8 microsecond units. If
34 delays are not used this parameter should be
35 set to -1.
36 ppEqObject - Internal EQ Handle returned.
37
38 Returns BE_SUCCESS if successfull,, otherwise a useful error code
39 is returned.
40
41 IRQL < DISPATCH_LEVEL
42*/
43int
44be_eq_create(struct be_function_object *pfob,
45 struct ring_desc *rd, u32 eqe_size, u32 num_entries,
46 u32 watermark, /* CEV_WMARK_* or -1 */
47 u32 timer_delay, /* in 8us units, or -1 */
48 struct be_eq_object *eq_object)
49{
50 int status = BE_SUCCESS;
51 u32 num_entries_encoding, eqe_size_encoding, length;
52 struct FWCMD_COMMON_EQ_CREATE *fwcmd = NULL;
53 struct MCC_WRB_AMAP *wrb = NULL;
54 u32 n;
55 unsigned long irql;
56
57 ASSERT(rd);
58 ASSERT(eq_object);
59
60 switch (num_entries) {
61 case 256:
62 num_entries_encoding = CEV_EQ_CNT_256;
63 break;
64 case 512:
65 num_entries_encoding = CEV_EQ_CNT_512;
66 break;
67 case 1024:
68 num_entries_encoding = CEV_EQ_CNT_1024;
69 break;
70 case 2048:
71 num_entries_encoding = CEV_EQ_CNT_2048;
72 break;
73 case 4096:
74 num_entries_encoding = CEV_EQ_CNT_4096;
75 break;
76 default:
77 ASSERT(0);
78 return BE_STATUS_INVALID_PARAMETER;
79 }
80
81 switch (eqe_size) {
82 case 4:
83 eqe_size_encoding = CEV_EQ_SIZE_4;
84 break;
85 case 16:
86 eqe_size_encoding = CEV_EQ_SIZE_16;
87 break;
88 default:
89 ASSERT(0);
90 return BE_STATUS_INVALID_PARAMETER;
91 }
92
93 if ((eqe_size == 4 && num_entries < 1024) ||
94 (eqe_size == 16 && num_entries == 4096)) {
95 TRACE(DL_ERR, "Bad EQ size. eqe_size:%d num_entries:%d",
96 eqe_size, num_entries);
97 ASSERT(0);
98 return BE_STATUS_INVALID_PARAMETER;
99 }
100
101 memset(eq_object, 0, sizeof(*eq_object));
102
103 atomic_set(&eq_object->ref_count, 0);
104 eq_object->parent_function = pfob;
105 eq_object->eq_id = 0xFFFFFFFF;
106
107 INIT_LIST_HEAD(&eq_object->cq_list_head);
108
109 length = num_entries * eqe_size;
110
111 spin_lock_irqsave(&pfob->post_lock, irql);
112
113 wrb = be_function_peek_mcc_wrb(pfob);
114 if (!wrb) {
115 ASSERT(wrb);
116 TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
117 status = BE_STATUS_NO_MCC_WRB;
118 goto Error;
119 }
120 /* Prepares an embedded fwcmd, including request/response sizes. */
121 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_EQ_CREATE);
122
123 fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
124 length);
125 n = pfob->pci_function_number;
126 AMAP_SET_BITS_PTR(EQ_CONTEXT, Func, &fwcmd->params.request.context, n);
127
128 AMAP_SET_BITS_PTR(EQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
129
130 AMAP_SET_BITS_PTR(EQ_CONTEXT, Size,
131 &fwcmd->params.request.context, eqe_size_encoding);
132
133 n = 0; /* Protection Domain is always 0 in Linux driver */
134 AMAP_SET_BITS_PTR(EQ_CONTEXT, PD, &fwcmd->params.request.context, n);
135
136 /* Let the caller ARM the EQ with the doorbell. */
137 AMAP_SET_BITS_PTR(EQ_CONTEXT, Armed, &fwcmd->params.request.context, 0);
138
139 AMAP_SET_BITS_PTR(EQ_CONTEXT, Count, &fwcmd->params.request.context,
140 num_entries_encoding);
141
142 n = pfob->pci_function_number * 32;
143 AMAP_SET_BITS_PTR(EQ_CONTEXT, EventVect,
144 &fwcmd->params.request.context, n);
145 if (watermark != -1) {
146 AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
147 &fwcmd->params.request.context, 1);
148 AMAP_SET_BITS_PTR(EQ_CONTEXT, Watermark,
149 &fwcmd->params.request.context, watermark);
150 ASSERT(watermark <= CEV_WMARK_240);
151 } else
152 AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
153 &fwcmd->params.request.context, 0);
154 if (timer_delay != -1) {
155 AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
156 &fwcmd->params.request.context, 1);
157
158 ASSERT(timer_delay <= 250); /* max value according to EAS */
159 timer_delay = min(timer_delay, (u32)250);
160
161 AMAP_SET_BITS_PTR(EQ_CONTEXT, Delay,
162 &fwcmd->params.request.context, timer_delay);
163 } else {
164 AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
165 &fwcmd->params.request.context, 0);
166 }
167 /* Create a page list for the FWCMD. */
168 be_rd_to_pa_list(rd, fwcmd->params.request.pages,
169 ARRAY_SIZE(fwcmd->params.request.pages));
170
171 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
172 NULL, NULL, fwcmd, NULL);
173 if (status != BE_SUCCESS) {
174 TRACE(DL_ERR, "MCC to create EQ failed.");
175 goto Error;
176 }
177 /* Get the EQ id. The MPU allocates the IDs. */
178 eq_object->eq_id = fwcmd->params.response.eq_id;
179
180Error:
181 spin_unlock_irqrestore(&pfob->post_lock, irql);
182
183 if (pfob->pend_queue_driving && pfob->mcc) {
184 pfob->pend_queue_driving = 0;
185 be_drive_mcc_wrb_queue(pfob->mcc);
186 }
187 return status;
188}
189
190/*
191 Deferences the given object. Once the object's reference count drops to
192 zero, the object is destroyed and all resources that are held by this
193 object are released. The on-chip context is also destroyed along with
194 the queue ID, and any mappings made into the UT.
195
196 eq_object - EQ handle returned from eq_object_create.
197
198 Returns BE_SUCCESS if successfull, otherwise a useful error code
199 is returned.
200
201 IRQL: IRQL < DISPATCH_LEVEL
202*/
203int be_eq_destroy(struct be_eq_object *eq_object)
204{
205 int status = 0;
206
207 ASSERT(atomic_read(&eq_object->ref_count) == 0);
208 /* no CQs should reference this EQ now */
209 ASSERT(list_empty(&eq_object->cq_list_head));
210
211 /* Send fwcmd to destroy the EQ. */
212 status = be_function_ring_destroy(eq_object->parent_function,
213 eq_object->eq_id, FWCMD_RING_TYPE_EQ,
214 NULL, NULL, NULL, NULL);
215 ASSERT(status == 0);
216
217 return BE_SUCCESS;
218}
219/*
220 *---------------------------------------------------------------------------
221 * Function: be_eq_modify_delay
222 * Changes the EQ delay for a group of EQs.
223 * num_eq - The number of EQs in the eq_array to adjust.
224 * This also is the number of delay values in
225 * the eq_delay_array.
226 * eq_array - Array of struct be_eq_object pointers to adjust.
227 * eq_delay_array - Array of "num_eq" timer delays in units
228 * of microseconds. The be_eq_query_delay_range
229 * fwcmd returns the resolution and range of
230 * legal EQ delays.
231 * cb -
232 * cb_context -
233 * q_ctxt - Optional. Pointer to a previously allocated
234 * struct. If the MCC WRB ring is full, this
235 * structure is used to queue the operation. It
236 * will be posted to the MCC ring when space
237 * becomes available. All queued commands will
238 * be posted to the ring in the order they are
239 * received. It is always valid to pass a pointer to
240 * a generic be_generic_q_cntxt. However,
241 * the specific context structs
242 * are generally smaller than the generic struct.
243 * return pend_status - BE_SUCCESS (0) on success.
244 * BE_PENDING (postive value) if the FWCMD
245 * completion is pending. Negative error code on failure.
246 *-------------------------------------------------------------------------
247 */
248int
249be_eq_modify_delay(struct be_function_object *pfob,
250 u32 num_eq, struct be_eq_object **eq_array,
251 u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
252 void *cb_context, struct be_eq_modify_delay_q_ctxt *q_ctxt)
253{
254 struct FWCMD_COMMON_MODIFY_EQ_DELAY *fwcmd = NULL;
255 struct MCC_WRB_AMAP *wrb = NULL;
256 int status = 0;
257 struct be_generic_q_ctxt *gen_ctxt = NULL;
258 u32 i;
259 unsigned long irql;
260
261 spin_lock_irqsave(&pfob->post_lock, irql);
262
263 wrb = be_function_peek_mcc_wrb(pfob);
264 if (!wrb) {
265 if (q_ctxt && cb) {
266 wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
267 gen_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
268 gen_ctxt->context.bytes = sizeof(*q_ctxt);
269 } else {
270 status = BE_STATUS_NO_MCC_WRB;
271 goto Error;
272 }
273 }
274 /* Prepares an embedded fwcmd, including request/response sizes. */
275 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MODIFY_EQ_DELAY);
276
277 ASSERT(num_eq > 0);
278 ASSERT(num_eq <= ARRAY_SIZE(fwcmd->params.request.delay));
279 fwcmd->params.request.num_eq = num_eq;
280 for (i = 0; i < num_eq; i++) {
281 fwcmd->params.request.delay[i].eq_id = eq_array[i]->eq_id;
282 fwcmd->params.request.delay[i].delay_in_microseconds =
283 eq_delay_array[i];
284 }
285
286 /* Post the f/w command */
287 status = be_function_post_mcc_wrb(pfob, wrb, gen_ctxt,
288 cb, cb_context, NULL, NULL, fwcmd, NULL);
289
290Error:
291 spin_unlock_irqrestore(&pfob->post_lock, irql);
292
293 if (pfob->pend_queue_driving && pfob->mcc) {
294 pfob->pend_queue_driving = 0;
295 be_drive_mcc_wrb_queue(pfob->mcc);
296 }
297 return status;
298}
299
diff --git a/drivers/staging/benet/eth.c b/drivers/staging/benet/eth.c
deleted file mode 100644
index f641b6260d07..000000000000
--- a/drivers/staging/benet/eth.c
+++ /dev/null
@@ -1,1273 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#include <linux/if_ether.h>
18#include "hwlib.h"
19#include "bestatus.h"
20
21/*
22 *---------------------------------------------------------
23 * Function: be_eth_sq_create_ex
24 * Creates an ethernet send ring - extended version with
25 * additional parameters.
26 * pfob -
27 * rd - ring address
28 * length_in_bytes -
29 * type - The type of ring to create.
30 * ulp - The requested ULP number for the ring.
31 * This should be zero based, i.e. 0,1,2. This must
32 * be valid NIC ULP based on the firmware config.
33 * All doorbells for this ring must be sent to
34 * this ULP. The first network ring allocated for
35 * each ULP are higher performance than subsequent rings.
36 * cq_object - cq object for completions
37 * ex_parameters - Additional parameters (that may increase in
38 * future revisions). These parameters are only used
39 * for certain ring types -- see
40 * struct be_eth_sq_parameters for details.
41 * eth_sq -
42 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
43 *---------------------------------------------------------
44 */
45int
46be_eth_sq_create_ex(struct be_function_object *pfob, struct ring_desc *rd,
47 u32 length, u32 type, u32 ulp, struct be_cq_object *cq_object,
48 struct be_eth_sq_parameters *ex_parameters,
49 struct be_ethsq_object *eth_sq)
50{
51 struct FWCMD_COMMON_ETH_TX_CREATE *fwcmd = NULL;
52 struct MCC_WRB_AMAP *wrb = NULL;
53 int status = 0;
54 u32 n;
55 unsigned long irql;
56
57 ASSERT(rd);
58 ASSERT(eth_sq);
59 ASSERT(ex_parameters);
60
61 spin_lock_irqsave(&pfob->post_lock, irql);
62
63 memset(eth_sq, 0, sizeof(*eth_sq));
64
65 eth_sq->parent_function = pfob;
66 eth_sq->bid = 0xFFFFFFFF;
67 eth_sq->cq_object = cq_object;
68
69 /* Translate hwlib interface to arm interface. */
70 switch (type) {
71 case BE_ETH_TX_RING_TYPE_FORWARDING:
72 type = ETH_TX_RING_TYPE_FORWARDING;
73 break;
74 case BE_ETH_TX_RING_TYPE_STANDARD:
75 type = ETH_TX_RING_TYPE_STANDARD;
76 break;
77 case BE_ETH_TX_RING_TYPE_BOUND:
78 ASSERT(ex_parameters->port < 2);
79 type = ETH_TX_RING_TYPE_BOUND;
80 break;
81 default:
82 TRACE(DL_ERR, "Invalid eth tx ring type:%d", type);
83 return BE_NOT_OK;
84 break;
85 }
86
87 wrb = be_function_peek_mcc_wrb(pfob);
88 if (!wrb) {
89 ASSERT(wrb);
90 TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
91 status = BE_STATUS_NO_MCC_WRB;
92 goto Error;
93 }
94 /* NIC must be supported by the current config. */
95 ASSERT(pfob->fw_config.nic_ulp_mask);
96
97 /*
98 * The ulp parameter must select a valid NIC ULP
99 * for the current config.
100 */
101 ASSERT((1 << ulp) & pfob->fw_config.nic_ulp_mask);
102
103 /* Prepares an embedded fwcmd, including request/response sizes. */
104 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_TX_CREATE);
105 fwcmd->header.request.port_number = ex_parameters->port;
106
107 AMAP_SET_BITS_PTR(ETX_CONTEXT, pd_id,
108 &fwcmd->params.request.context, 0);
109
110 n = be_ring_length_to_encoding(length, sizeof(struct ETH_WRB_AMAP));
111 AMAP_SET_BITS_PTR(ETX_CONTEXT, tx_ring_size,
112 &fwcmd->params.request.context, n);
113
114 AMAP_SET_BITS_PTR(ETX_CONTEXT, cq_id_send,
115 &fwcmd->params.request.context, cq_object->cq_id);
116
117 n = pfob->pci_function_number;
118 AMAP_SET_BITS_PTR(ETX_CONTEXT, func, &fwcmd->params.request.context, n);
119
120 fwcmd->params.request.type = type;
121 fwcmd->params.request.ulp_num = (1 << ulp);
122 fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
123 ASSERT(PAGES_SPANNED(rd->va, rd->length) >=
124 fwcmd->params.request.num_pages);
125
126 /* Create a page list for the FWCMD. */
127 be_rd_to_pa_list(rd, fwcmd->params.request.pages,
128 ARRAY_SIZE(fwcmd->params.request.pages));
129
130 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
131 NULL, NULL, fwcmd, NULL);
132 if (status != BE_SUCCESS) {
133 TRACE(DL_ERR, "MCC to create etx queue failed.");
134 goto Error;
135 }
136 /* save the butler ID */
137 eth_sq->bid = fwcmd->params.response.cid;
138
139 /* add a reference to the corresponding CQ */
140 atomic_inc(&cq_object->ref_count);
141
142Error:
143 spin_unlock_irqrestore(&pfob->post_lock, irql);
144
145 if (pfob->pend_queue_driving && pfob->mcc) {
146 pfob->pend_queue_driving = 0;
147 be_drive_mcc_wrb_queue(pfob->mcc);
148 }
149 return status;
150}
151
152
153/*
154 This routine destroys an ethernet send queue
155
156 EthSq - EthSq Handle returned from EthSqCreate
157
158 This function always return BE_SUCCESS.
159
160 This function frees memory allocated by EthSqCreate for the EthSq Object.
161
162*/
163int be_eth_sq_destroy(struct be_ethsq_object *eth_sq)
164{
165 int status = 0;
166
167 /* Send fwcmd to destroy the queue. */
168 status = be_function_ring_destroy(eth_sq->parent_function, eth_sq->bid,
169 FWCMD_RING_TYPE_ETH_TX, NULL, NULL, NULL, NULL);
170 ASSERT(status == 0);
171
172 /* Derefence any associated CQs. */
173 atomic_dec(&eth_sq->cq_object->ref_count);
174 return status;
175}
176/*
177 This routine attempts to set the transmit flow control parameters.
178
179 FunctionObject - Handle to a function object
180
181 txfc_enable - transmit flow control enable - true for
182 enable, false for disable
183
184 rxfc_enable - receive flow control enable - true for
185 enable, false for disable
186
187 Returns BE_SUCCESS if successfull, otherwise a useful int error
188 code is returned.
189
190 IRQL: < DISPATCH_LEVEL
191
192 This function always fails in non-privileged machine context.
193*/
194int
195be_eth_set_flow_control(struct be_function_object *pfob,
196 bool txfc_enable, bool rxfc_enable)
197{
198 struct FWCMD_COMMON_SET_FLOW_CONTROL *fwcmd = NULL;
199 struct MCC_WRB_AMAP *wrb = NULL;
200 int status = 0;
201 unsigned long irql;
202
203 spin_lock_irqsave(&pfob->post_lock, irql);
204
205 wrb = be_function_peek_mcc_wrb(pfob);
206 if (!wrb) {
207 TRACE(DL_ERR, "MCC wrb peek failed.");
208 status = BE_STATUS_NO_MCC_WRB;
209 goto error;
210 }
211 /* Prepares an embedded fwcmd, including request/response sizes. */
212 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FLOW_CONTROL);
213
214 fwcmd->params.request.rx_flow_control = rxfc_enable;
215 fwcmd->params.request.tx_flow_control = txfc_enable;
216
217 /* Post the f/w command */
218 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
219 NULL, NULL, fwcmd, NULL);
220
221 if (status != 0) {
222 TRACE(DL_ERR, "set flow control fwcmd failed.");
223 goto error;
224 }
225
226error:
227 spin_unlock_irqrestore(&pfob->post_lock, irql);
228
229 if (pfob->pend_queue_driving && pfob->mcc) {
230 pfob->pend_queue_driving = 0;
231 be_drive_mcc_wrb_queue(pfob->mcc);
232 }
233 return status;
234}
235
236/*
237 This routine attempts to get the transmit flow control parameters.
238
239 pfob - Handle to a function object
240
241 txfc_enable - transmit flow control enable - true for
242 enable, false for disable
243
244 rxfc_enable - receive flow control enable - true for enable,
245 false for disable
246
247 Returns BE_SUCCESS if successfull, otherwise a useful int error code
248 is returned.
249
250 IRQL: < DISPATCH_LEVEL
251
252 This function always fails in non-privileged machine context.
253*/
254int
255be_eth_get_flow_control(struct be_function_object *pfob,
256 bool *txfc_enable, bool *rxfc_enable)
257{
258 struct FWCMD_COMMON_GET_FLOW_CONTROL *fwcmd = NULL;
259 struct MCC_WRB_AMAP *wrb = NULL;
260 int status = 0;
261 unsigned long irql;
262
263 spin_lock_irqsave(&pfob->post_lock, irql);
264
265 wrb = be_function_peek_mcc_wrb(pfob);
266 if (!wrb) {
267 TRACE(DL_ERR, "MCC wrb peek failed.");
268 status = BE_STATUS_NO_MCC_WRB;
269 goto error;
270 }
271 /* Prepares an embedded fwcmd, including request/response sizes. */
272 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FLOW_CONTROL);
273
274 /* Post the f/w command */
275 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
276 NULL, NULL, fwcmd, NULL);
277
278 if (status != 0) {
279 TRACE(DL_ERR, "get flow control fwcmd failed.");
280 goto error;
281 }
282
283 *txfc_enable = fwcmd->params.response.tx_flow_control;
284 *rxfc_enable = fwcmd->params.response.rx_flow_control;
285
286error:
287 spin_unlock_irqrestore(&pfob->post_lock, irql);
288
289 if (pfob->pend_queue_driving && pfob->mcc) {
290 pfob->pend_queue_driving = 0;
291 be_drive_mcc_wrb_queue(pfob->mcc);
292 }
293 return status;
294}
295
296/*
297 *---------------------------------------------------------
298 * Function: be_eth_set_qos
299 * This function sets the ethernet transmit Quality of Service (QoS)
300 * characteristics of BladeEngine for the domain. All ethernet
301 * transmit rings of the domain will evenly share the bandwidth.
302 * The exeception to sharing is the host primary (super) ethernet
303 * transmit ring as well as the host ethernet forwarding ring
304 * for missed offload data.
305 * pfob -
306 * max_bps - the maximum bits per second in units of
307 * 10 Mbps (valid 0-100)
308 * max_pps - the maximum packets per second in units
309 * of 1 Kpps (0 indicates no limit)
310 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
311 *---------------------------------------------------------
312 */
313int
314be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps)
315{
316 struct FWCMD_COMMON_SET_QOS *fwcmd = NULL;
317 struct MCC_WRB_AMAP *wrb = NULL;
318 int status = 0;
319 unsigned long irql;
320
321 spin_lock_irqsave(&pfob->post_lock, irql);
322
323 wrb = be_function_peek_mcc_wrb(pfob);
324 if (!wrb) {
325 TRACE(DL_ERR, "MCC wrb peek failed.");
326 status = BE_STATUS_NO_MCC_WRB;
327 goto error;
328 }
329 /* Prepares an embedded fwcmd, including request/response sizes. */
330 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_QOS);
331
332 /* Set fields in fwcmd */
333 fwcmd->params.request.max_bits_per_second_NIC = max_bps;
334 fwcmd->params.request.max_packets_per_second_NIC = max_pps;
335 fwcmd->params.request.valid_flags = QOS_BITS_NIC | QOS_PKTS_NIC;
336
337 /* Post the f/w command */
338 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
339 NULL, NULL, fwcmd, NULL);
340
341 if (status != 0)
342 TRACE(DL_ERR, "network set qos fwcmd failed.");
343
344error:
345 spin_unlock_irqrestore(&pfob->post_lock, irql);
346 if (pfob->pend_queue_driving && pfob->mcc) {
347 pfob->pend_queue_driving = 0;
348 be_drive_mcc_wrb_queue(pfob->mcc);
349 }
350 return status;
351}
352
353/*
354 *---------------------------------------------------------
355 * Function: be_eth_get_qos
356 * This function retrieves the ethernet transmit Quality of Service (QoS)
357 * characteristics for the domain.
358 * max_bps - the maximum bits per second in units of
359 * 10 Mbps (valid 0-100)
360 * max_pps - the maximum packets per second in units of
361 * 1 Kpps (0 indicates no limit)
362 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
363 *---------------------------------------------------------
364 */
365int
366be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps)
367{
368 struct FWCMD_COMMON_GET_QOS *fwcmd = NULL;
369 struct MCC_WRB_AMAP *wrb = NULL;
370 int status = 0;
371 unsigned long irql;
372
373 spin_lock_irqsave(&pfob->post_lock, irql);
374
375 wrb = be_function_peek_mcc_wrb(pfob);
376 if (!wrb) {
377 TRACE(DL_ERR, "MCC wrb peek failed.");
378 status = BE_STATUS_NO_MCC_WRB;
379 goto error;
380 }
381 /* Prepares an embedded fwcmd, including request/response sizes. */
382 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_QOS);
383
384 /* Post the f/w command */
385 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
386 NULL, NULL, fwcmd, NULL);
387
388 if (status != 0) {
389 TRACE(DL_ERR, "network get qos fwcmd failed.");
390 goto error;
391 }
392
393 *max_bps = fwcmd->params.response.max_bits_per_second_NIC;
394 *max_pps = fwcmd->params.response.max_packets_per_second_NIC;
395
396error:
397 spin_unlock_irqrestore(&pfob->post_lock, irql);
398 if (pfob->pend_queue_driving && pfob->mcc) {
399 pfob->pend_queue_driving = 0;
400 be_drive_mcc_wrb_queue(pfob->mcc);
401 }
402 return status;
403}
404
405/*
406 *---------------------------------------------------------
407 * Function: be_eth_set_frame_size
408 * This function sets the ethernet maximum frame size. The previous
409 * values are returned.
410 * pfob -
411 * tx_frame_size - maximum transmit frame size in bytes
412 * rx_frame_size - maximum receive frame size in bytes
413 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
414 *---------------------------------------------------------
415 */
416int
417be_eth_set_frame_size(struct be_function_object *pfob,
418 u32 *tx_frame_size, u32 *rx_frame_size)
419{
420 struct FWCMD_COMMON_SET_FRAME_SIZE *fwcmd = NULL;
421 struct MCC_WRB_AMAP *wrb = NULL;
422 int status = 0;
423 unsigned long irql;
424
425 spin_lock_irqsave(&pfob->post_lock, irql);
426
427 wrb = be_function_peek_mcc_wrb(pfob);
428 if (!wrb) {
429 TRACE(DL_ERR, "MCC wrb peek failed.");
430 status = BE_STATUS_NO_MCC_WRB;
431 goto error;
432 }
433 /* Prepares an embedded fwcmd, including request/response sizes. */
434 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FRAME_SIZE);
435 fwcmd->params.request.max_tx_frame_size = *tx_frame_size;
436 fwcmd->params.request.max_rx_frame_size = *rx_frame_size;
437
438 /* Post the f/w command */
439 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
440 NULL, NULL, fwcmd, NULL);
441
442 if (status != 0) {
443 TRACE(DL_ERR, "network set frame size fwcmd failed.");
444 goto error;
445 }
446
447 *tx_frame_size = fwcmd->params.response.chip_max_tx_frame_size;
448 *rx_frame_size = fwcmd->params.response.chip_max_rx_frame_size;
449
450error:
451 spin_unlock_irqrestore(&pfob->post_lock, irql);
452 if (pfob->pend_queue_driving && pfob->mcc) {
453 pfob->pend_queue_driving = 0;
454 be_drive_mcc_wrb_queue(pfob->mcc);
455 }
456 return status;
457}
458
459
460/*
461 This routine creates a Ethernet receive ring.
462
463 pfob - handle to a function object
464 rq_base_va - base VA for the default receive ring. this must be
465 exactly 8K in length and continguous physical memory.
466 cq_object - handle to a previously created CQ to be associated
467 with the RQ.
468 pp_eth_rq - pointer to an opqaue handle where an eth
469 receive object is returned.
470 Returns BE_SUCCESS if successfull, , otherwise a useful
471 int error code is returned.
472
473 IRQL: < DISPATCH_LEVEL
474 this function allocates a struct be_ethrq_object *object.
475 there must be no more than 1 of these per function object, unless the
476 function object supports RSS (is networking and on the host).
477 the rq_base_va must point to a buffer of exactly 8K.
478 the erx::host_cqid (or host_stor_cqid) register and erx::ring_page registers
479 will be updated as appropriate on return
480*/
481int
482be_eth_rq_create(struct be_function_object *pfob,
483 struct ring_desc *rd, struct be_cq_object *cq_object,
484 struct be_cq_object *bcmc_cq_object,
485 struct be_ethrq_object *eth_rq)
486{
487 int status = 0;
488 struct MCC_WRB_AMAP *wrb = NULL;
489 struct FWCMD_COMMON_ETH_RX_CREATE *fwcmd = NULL;
490 unsigned long irql;
491
492 /* MPU will set the */
493 ASSERT(rd);
494 ASSERT(eth_rq);
495
496 spin_lock_irqsave(&pfob->post_lock, irql);
497
498 eth_rq->parent_function = pfob;
499 eth_rq->cq_object = cq_object;
500
501 wrb = be_function_peek_mcc_wrb(pfob);
502 if (!wrb) {
503 TRACE(DL_ERR, "MCC wrb peek failed.");
504 status = BE_STATUS_NO_MCC_WRB;
505 goto Error;
506 }
507 /* Prepares an embedded fwcmd, including request/response sizes. */
508 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_RX_CREATE);
509
510 fwcmd->params.request.num_pages = 2; /* required length */
511 fwcmd->params.request.cq_id = cq_object->cq_id;
512
513 if (bcmc_cq_object)
514 fwcmd->params.request.bcmc_cq_id = bcmc_cq_object->cq_id;
515 else
516 fwcmd->params.request.bcmc_cq_id = 0xFFFF;
517
518 /* Create a page list for the FWCMD. */
519 be_rd_to_pa_list(rd, fwcmd->params.request.pages,
520 ARRAY_SIZE(fwcmd->params.request.pages));
521
522 /* Post the f/w command */
523 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
524 NULL, NULL, fwcmd, NULL);
525 if (status != BE_SUCCESS) {
526 TRACE(DL_ERR, "fwcmd to map eth rxq frags failed.");
527 goto Error;
528 }
529 /* Save the ring ID for cleanup. */
530 eth_rq->rid = fwcmd->params.response.id;
531
532 atomic_inc(&cq_object->ref_count);
533
534Error:
535 spin_unlock_irqrestore(&pfob->post_lock, irql);
536
537 if (pfob->pend_queue_driving && pfob->mcc) {
538 pfob->pend_queue_driving = 0;
539 be_drive_mcc_wrb_queue(pfob->mcc);
540 }
541 return status;
542}
543
544/*
545 This routine destroys an Ethernet receive queue
546
547 eth_rq - ethernet receive queue handle returned from eth_rq_create
548
549 Returns BE_SUCCESS on success and an appropriate int on failure.
550
551 This function frees resourcs allocated by EthRqCreate.
552 The erx::host_cqid (or host_stor_cqid) register and erx::ring_page
553 registers will be updated as appropriate on return
554 IRQL: < DISPATCH_LEVEL
555*/
556
557static void be_eth_rq_destroy_internal_cb(void *context, int status,
558 struct MCC_WRB_AMAP *wrb)
559{
560 struct be_ethrq_object *eth_rq = (struct be_ethrq_object *) context;
561
562 if (status != BE_SUCCESS) {
563 TRACE(DL_ERR, "Destroy eth rq failed in internal callback.\n");
564 } else {
565 /* Dereference any CQs associated with this queue. */
566 atomic_dec(&eth_rq->cq_object->ref_count);
567 }
568
569 return;
570}
571
572int be_eth_rq_destroy(struct be_ethrq_object *eth_rq)
573{
574 int status = BE_SUCCESS;
575
576 /* Send fwcmd to destroy the RQ. */
577 status = be_function_ring_destroy(eth_rq->parent_function,
578 eth_rq->rid, FWCMD_RING_TYPE_ETH_RX, NULL, NULL,
579 be_eth_rq_destroy_internal_cb, eth_rq);
580
581 return status;
582}
583
584/*
585 *---------------------------------------------------------------------------
586 * Function: be_eth_rq_destroy_options
587 * Destroys an ethernet receive ring with finer granularity options
588 * than the standard be_eth_rq_destroy() API function.
589 * eth_rq -
590 * flush - Set to 1 to flush the ring, set to 0 to bypass the flush
591 * cb - Callback function on completion
592 * cb_context - Callback context
593 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
594 *----------------------------------------------------------------------------
595 */
596int
597be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
598 mcc_wrb_cqe_callback cb, void *cb_context)
599{
600 struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
601 struct MCC_WRB_AMAP *wrb = NULL;
602 int status = BE_SUCCESS;
603 struct be_function_object *pfob = NULL;
604 unsigned long irql;
605
606 pfob = eth_rq->parent_function;
607
608 spin_lock_irqsave(&pfob->post_lock, irql);
609
610 TRACE(DL_INFO, "Destroy eth_rq ring id:%d, flush:%d", eth_rq->rid,
611 flush);
612
613 wrb = be_function_peek_mcc_wrb(pfob);
614 if (!wrb) {
615 ASSERT(wrb);
616 TRACE(DL_ERR, "No free MCC WRBs in destroy eth_rq ring.");
617 status = BE_STATUS_NO_MCC_WRB;
618 goto Error;
619 }
620 /* Prepares an embedded fwcmd, including request/response sizes. */
621 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
622
623 fwcmd->params.request.id = eth_rq->rid;
624 fwcmd->params.request.ring_type = FWCMD_RING_TYPE_ETH_RX;
625 fwcmd->params.request.bypass_flush = ((0 == flush) ? 1 : 0);
626
627 /* Post the f/w command */
628 status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
629 be_eth_rq_destroy_internal_cb, eth_rq, fwcmd, NULL);
630
631 if (status != BE_SUCCESS && status != BE_PENDING) {
632 TRACE(DL_ERR, "eth_rq ring destroy failed. id:%d, flush:%d",
633 eth_rq->rid, flush);
634 goto Error;
635 }
636
637Error:
638 spin_unlock_irqrestore(&pfob->post_lock, irql);
639
640 if (pfob->pend_queue_driving && pfob->mcc) {
641 pfob->pend_queue_driving = 0;
642 be_drive_mcc_wrb_queue(pfob->mcc);
643 }
644 return status;
645}
646
647/*
648 This routine queries the frag size for erx.
649
650 pfob - handle to a function object
651
652 frag_size_bytes - erx frag size in bytes that is/was set.
653
654 Returns BE_SUCCESS if successfull, otherwise a useful int error
655 code is returned.
656
657 IRQL: < DISPATCH_LEVEL
658
659*/
660int
661be_eth_rq_get_frag_size(struct be_function_object *pfob, u32 *frag_size_bytes)
662{
663 struct FWCMD_ETH_GET_RX_FRAG_SIZE *fwcmd = NULL;
664 struct MCC_WRB_AMAP *wrb = NULL;
665 int status = 0;
666 unsigned long irql;
667
668 ASSERT(frag_size_bytes);
669
670 spin_lock_irqsave(&pfob->post_lock, irql);
671
672 wrb = be_function_peek_mcc_wrb(pfob);
673 if (!wrb) {
674 TRACE(DL_ERR, "MCC wrb peek failed.");
675 return BE_STATUS_NO_MCC_WRB;
676 }
677 /* Prepares an embedded fwcmd, including request/response sizes. */
678 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_GET_RX_FRAG_SIZE);
679
680 /* Post the f/w command */
681 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
682 NULL, NULL, fwcmd, NULL);
683
684 if (status != 0) {
685 TRACE(DL_ERR, "get frag size fwcmd failed.");
686 goto error;
687 }
688
689 *frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2;
690
691error:
692 spin_unlock_irqrestore(&pfob->post_lock, irql);
693
694 if (pfob->pend_queue_driving && pfob->mcc) {
695 pfob->pend_queue_driving = 0;
696 be_drive_mcc_wrb_queue(pfob->mcc);
697 }
698 return status;
699}
700
701/*
702 This routine attempts to set the frag size for erx. If the frag size is
703 already set, the attempt fails and the current frag size is returned.
704
705 pfob - Handle to a function object
706
707 frag_size - Erx frag size in bytes that is/was set.
708
709 current_frag_size_bytes - Pointer to location where currrent frag
710 is to be rturned
711
712 Returns BE_SUCCESS if successfull, otherwise a useful int error
713 code is returned.
714
715 IRQL: < DISPATCH_LEVEL
716
717 This function always fails in non-privileged machine context.
718*/
719int
720be_eth_rq_set_frag_size(struct be_function_object *pfob,
721 u32 frag_size, u32 *frag_size_bytes)
722{
723 struct FWCMD_ETH_SET_RX_FRAG_SIZE *fwcmd = NULL;
724 struct MCC_WRB_AMAP *wrb = NULL;
725 int status = 0;
726 unsigned long irql;
727
728 ASSERT(frag_size_bytes);
729
730 spin_lock_irqsave(&pfob->post_lock, irql);
731
732 wrb = be_function_peek_mcc_wrb(pfob);
733 if (!wrb) {
734 TRACE(DL_ERR, "MCC wrb peek failed.");
735 status = BE_STATUS_NO_MCC_WRB;
736 goto error;
737 }
738 /* Prepares an embedded fwcmd, including request/response sizes. */
739 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_SET_RX_FRAG_SIZE);
740
741 ASSERT(frag_size >= 128 && frag_size <= 16 * 1024);
742
743 /* This is the log2 of the fragsize. This is not the exact
744 * ERX encoding. */
745 fwcmd->params.request.new_fragsize_log2 = __ilog2_u32(frag_size);
746
747 /* Post the f/w command */
748 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
749 NULL, NULL, fwcmd, NULL);
750
751 if (status != 0) {
752 TRACE(DL_ERR, "set frag size fwcmd failed.");
753 goto error;
754 }
755
756 *frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2;
757error:
758 spin_unlock_irqrestore(&pfob->post_lock, irql);
759
760 if (pfob->pend_queue_driving && pfob->mcc) {
761 pfob->pend_queue_driving = 0;
762 be_drive_mcc_wrb_queue(pfob->mcc);
763 }
764 return status;
765}
766
767
768/*
769 This routine gets or sets a mac address for a domain
770 given the port and mac.
771
772 FunctionObject - Function object handle.
773 port1 - Set to TRUE if this function will set/get the Port 1
774 address. Only the host may set this to TRUE.
775 mac1 - Set to TRUE if this function will set/get the
776 MAC 1 address. Only the host may set this to TRUE.
777 write - Set to TRUE if this function should write the mac address.
778 mac_address - Buffer of the mac address to read or write.
779
780 Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
781
782 IRQL: < DISPATCH_LEVEL
783*/
784int be_rxf_mac_address_read_write(struct be_function_object *pfob,
785 bool port1, /* VM must always set to false */
786 bool mac1, /* VM must always set to false */
787 bool mgmt, bool write,
788 bool permanent, u8 *mac_address,
789 mcc_wrb_cqe_callback cb, /* optional */
790 void *cb_context) /* optional */
791{
792 int status = BE_SUCCESS;
793 union {
794 struct FWCMD_COMMON_NTWK_MAC_QUERY *query;
795 struct FWCMD_COMMON_NTWK_MAC_SET *set;
796 } fwcmd = {NULL};
797 struct MCC_WRB_AMAP *wrb = NULL;
798 u32 type = 0;
799 unsigned long irql;
800 struct be_mcc_wrb_response_copy rc;
801
802 spin_lock_irqsave(&pfob->post_lock, irql);
803
804 ASSERT(mac_address);
805
806 ASSERT(port1 == false);
807 ASSERT(mac1 == false);
808
809 wrb = be_function_peek_mcc_wrb(pfob);
810 if (!wrb) {
811 TRACE(DL_ERR, "MCC wrb peek failed.");
812 status = BE_STATUS_NO_MCC_WRB;
813 goto Error;
814 }
815
816 if (mgmt) {
817 type = MAC_ADDRESS_TYPE_MANAGEMENT;
818 } else {
819 if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
820 type = MAC_ADDRESS_TYPE_NETWORK;
821 else
822 type = MAC_ADDRESS_TYPE_STORAGE;
823 }
824
825 if (write) {
826 /* Prepares an embedded fwcmd, including
827 * request/response sizes.
828 */
829 fwcmd.set = BE_PREPARE_EMBEDDED_FWCMD(pfob,
830 wrb, COMMON_NTWK_MAC_SET);
831
832 fwcmd.set->params.request.invalidate = 0;
833 fwcmd.set->params.request.mac1 = (mac1 ? 1 : 0);
834 fwcmd.set->params.request.port = (port1 ? 1 : 0);
835 fwcmd.set->params.request.type = type;
836
837 /* Copy the mac address to set. */
838 fwcmd.set->params.request.mac.SizeOfStructure =
839 sizeof(fwcmd.set->params.request.mac);
840 memcpy(fwcmd.set->params.request.mac.MACAddress,
841 mac_address, ETH_ALEN);
842
843 /* Post the f/w command */
844 status = be_function_post_mcc_wrb(pfob, wrb, NULL,
845 cb, cb_context, NULL, NULL, fwcmd.set, NULL);
846
847 } else {
848
849 /*
850 * Prepares an embedded fwcmd, including
851 * request/response sizes.
852 */
853 fwcmd.query = BE_PREPARE_EMBEDDED_FWCMD(pfob,
854 wrb, COMMON_NTWK_MAC_QUERY);
855
856 fwcmd.query->params.request.mac1 = (mac1 ? 1 : 0);
857 fwcmd.query->params.request.port = (port1 ? 1 : 0);
858 fwcmd.query->params.request.type = type;
859 fwcmd.query->params.request.permanent = permanent;
860
861 rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_MAC_QUERY,
862 params.response.mac.MACAddress);
863 rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_MAC_QUERY,
864 params.response.mac.MACAddress);
865 rc.va = mac_address;
866 /* Post the f/w command (with a copy for the response) */
867 status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
868 cb_context, NULL, NULL, fwcmd.query, &rc);
869 }
870
871 if (status < 0) {
872 TRACE(DL_ERR, "mac set/query failed.");
873 goto Error;
874 }
875
876Error:
877 spin_unlock_irqrestore(&pfob->post_lock, irql);
878 if (pfob->pend_queue_driving && pfob->mcc) {
879 pfob->pend_queue_driving = 0;
880 be_drive_mcc_wrb_queue(pfob->mcc);
881 }
882 return status;
883}
884
885/*
886 This routine writes data to context memory.
887
888 pfob - Function object handle.
889 mac_table - Set to the 128-bit multicast address hash table.
890
891 Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
892
893 IRQL: < DISPATCH_LEVEL
894*/
895
896int be_rxf_multicast_config(struct be_function_object *pfob,
897 bool promiscuous, u32 num, u8 *mac_table,
898 mcc_wrb_cqe_callback cb, /* optional */
899 void *cb_context,
900 struct be_multicast_q_ctxt *q_ctxt)
901{
902 int status = BE_SUCCESS;
903 struct FWCMD_COMMON_NTWK_MULTICAST_SET *fwcmd = NULL;
904 struct MCC_WRB_AMAP *wrb = NULL;
905 struct be_generic_q_ctxt *generic_ctxt = NULL;
906 unsigned long irql;
907
908 ASSERT(num <= ARRAY_SIZE(fwcmd->params.request.mac));
909
910 if (num > ARRAY_SIZE(fwcmd->params.request.mac)) {
911 TRACE(DL_ERR, "Too many multicast addresses. BE supports %d.",
912 (int) ARRAY_SIZE(fwcmd->params.request.mac));
913 return BE_NOT_OK;
914 }
915
916 spin_lock_irqsave(&pfob->post_lock, irql);
917
918 wrb = be_function_peek_mcc_wrb(pfob);
919 if (!wrb) {
920 if (q_ctxt && cb) {
921 wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
922 generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
923 generic_ctxt->context.bytes = sizeof(*q_ctxt);
924 } else {
925 status = BE_STATUS_NO_MCC_WRB;
926 goto Error;
927 }
928 }
929 /* Prepares an embedded fwcmd, including request/response sizes. */
930 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_MULTICAST_SET);
931
932 fwcmd->params.request.promiscuous = promiscuous;
933 if (!promiscuous) {
934 fwcmd->params.request.num_mac = num;
935 if (num > 0) {
936 ASSERT(mac_table);
937 memcpy(fwcmd->params.request.mac,
938 mac_table, ETH_ALEN * num);
939 }
940 }
941
942 /* Post the f/w command */
943 status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
944 cb, cb_context, NULL, NULL, fwcmd, NULL);
945 if (status < 0) {
946 TRACE(DL_ERR, "multicast fwcmd failed.");
947 goto Error;
948 }
949
950Error:
951 spin_unlock_irqrestore(&pfob->post_lock, irql);
952 if (pfob->pend_queue_driving && pfob->mcc) {
953 pfob->pend_queue_driving = 0;
954 be_drive_mcc_wrb_queue(pfob->mcc);
955 }
956 return status;
957}
958
959/*
960 This routine adds or removes a vlan tag from the rxf table.
961
962 FunctionObject - Function object handle.
963 VLanTag - VLan tag to add or remove.
964 Add - Set to TRUE if this will add a vlan tag
965
966 Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
967
968 IRQL: < DISPATCH_LEVEL
969*/
970int be_rxf_vlan_config(struct be_function_object *pfob,
971 bool promiscuous, u32 num, u16 *vlan_tag_array,
972 mcc_wrb_cqe_callback cb, /* optional */
973 void *cb_context,
974 struct be_vlan_q_ctxt *q_ctxt) /* optional */
975{
976 int status = BE_SUCCESS;
977 struct FWCMD_COMMON_NTWK_VLAN_CONFIG *fwcmd = NULL;
978 struct MCC_WRB_AMAP *wrb = NULL;
979 struct be_generic_q_ctxt *generic_ctxt = NULL;
980 unsigned long irql;
981
982 if (num > ARRAY_SIZE(fwcmd->params.request.vlan_tag)) {
983 TRACE(DL_ERR, "Too many VLAN tags.");
984 return BE_NOT_OK;
985 }
986
987 spin_lock_irqsave(&pfob->post_lock, irql);
988
989 wrb = be_function_peek_mcc_wrb(pfob);
990 if (!wrb) {
991 if (q_ctxt && cb) {
992 wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
993 generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
994 generic_ctxt->context.bytes = sizeof(*q_ctxt);
995 } else {
996 status = BE_STATUS_NO_MCC_WRB;
997 goto Error;
998 }
999 }
1000 /* Prepares an embedded fwcmd, including request/response sizes. */
1001 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_VLAN_CONFIG);
1002
1003 fwcmd->params.request.promiscuous = promiscuous;
1004 if (!promiscuous) {
1005 fwcmd->params.request.num_vlan = num;
1006
1007 if (num > 0) {
1008 ASSERT(vlan_tag_array);
1009 memcpy(fwcmd->params.request.vlan_tag, vlan_tag_array,
1010 num * sizeof(vlan_tag_array[0]));
1011 }
1012 }
1013
1014 /* Post the commadn */
1015 status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
1016 cb, cb_context, NULL, NULL, fwcmd, NULL);
1017 if (status < 0) {
1018 TRACE(DL_ERR, "vlan fwcmd failed.");
1019 goto Error;
1020 }
1021
1022Error:
1023 spin_unlock_irqrestore(&pfob->post_lock, irql);
1024 if (pfob->pend_queue_driving && pfob->mcc) {
1025 pfob->pend_queue_driving = 0;
1026 be_drive_mcc_wrb_queue(pfob->mcc);
1027 }
1028 return status;
1029}
1030
1031
1032int be_rxf_link_status(struct be_function_object *pfob,
1033 struct BE_LINK_STATUS *link_status,
1034 mcc_wrb_cqe_callback cb,
1035 void *cb_context,
1036 struct be_link_status_q_ctxt *q_ctxt)
1037{
1038 struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY *fwcmd = NULL;
1039 struct MCC_WRB_AMAP *wrb = NULL;
1040 int status = 0;
1041 struct be_generic_q_ctxt *generic_ctxt = NULL;
1042 unsigned long irql;
1043 struct be_mcc_wrb_response_copy rc;
1044
1045 ASSERT(link_status);
1046
1047 spin_lock_irqsave(&pfob->post_lock, irql);
1048
1049 wrb = be_function_peek_mcc_wrb(pfob);
1050
1051 if (!wrb) {
1052 if (q_ctxt && cb) {
1053 wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
1054 generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
1055 generic_ctxt->context.bytes = sizeof(*q_ctxt);
1056 } else {
1057 status = BE_STATUS_NO_MCC_WRB;
1058 goto Error;
1059 }
1060 }
1061 /* Prepares an embedded fwcmd, including request/response sizes. */
1062 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb,
1063 COMMON_NTWK_LINK_STATUS_QUERY);
1064
1065 rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY,
1066 params.response);
1067 rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY,
1068 params.response);
1069 rc.va = link_status;
1070 /* Post or queue the f/w command */
1071 status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
1072 cb, cb_context, NULL, NULL, fwcmd, &rc);
1073
1074 if (status < 0) {
1075 TRACE(DL_ERR, "link status fwcmd failed.");
1076 goto Error;
1077 }
1078
1079Error:
1080 spin_unlock_irqrestore(&pfob->post_lock, irql);
1081 if (pfob->pend_queue_driving && pfob->mcc) {
1082 pfob->pend_queue_driving = 0;
1083 be_drive_mcc_wrb_queue(pfob->mcc);
1084 }
1085 return status;
1086}
1087
1088int
1089be_rxf_query_eth_statistics(struct be_function_object *pfob,
1090 struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
1091 u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
1092 void *cb_context,
1093 struct be_nonembedded_q_ctxt *q_ctxt)
1094{
1095 struct MCC_WRB_AMAP *wrb = NULL;
1096 int status = 0;
1097 struct be_generic_q_ctxt *generic_ctxt = NULL;
1098 unsigned long irql;
1099
1100 ASSERT(va_for_fwcmd);
1101 ASSERT(pa_for_fwcmd);
1102
1103 spin_lock_irqsave(&pfob->post_lock, irql);
1104
1105 wrb = be_function_peek_mcc_wrb(pfob);
1106
1107 if (!wrb) {
1108 if (q_ctxt && cb) {
1109 wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
1110 generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
1111 generic_ctxt->context.bytes = sizeof(*q_ctxt);
1112 } else {
1113 status = BE_STATUS_NO_MCC_WRB;
1114 goto Error;
1115 }
1116 }
1117
1118 TRACE(DL_INFO, "Query eth stats. fwcmd va:%p pa:0x%08x_%08x",
1119 va_for_fwcmd, upper_32_bits(pa_for_fwcmd), (u32)pa_for_fwcmd);
1120
1121 /* Prepares an embedded fwcmd, including request/response sizes. */
1122 va_for_fwcmd = BE_PREPARE_NONEMBEDDED_FWCMD(pfob, wrb,
1123 va_for_fwcmd, pa_for_fwcmd, ETH_GET_STATISTICS);
1124
1125 /* Post the f/w command */
1126 status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
1127 cb, cb_context, NULL, NULL, va_for_fwcmd, NULL);
1128 if (status < 0) {
1129 TRACE(DL_ERR, "eth stats fwcmd failed.");
1130 goto Error;
1131 }
1132
1133Error:
1134 spin_unlock_irqrestore(&pfob->post_lock, irql);
1135 if (pfob->pend_queue_driving && pfob->mcc) {
1136 pfob->pend_queue_driving = 0;
1137 be_drive_mcc_wrb_queue(pfob->mcc);
1138 }
1139 return status;
1140}
1141
1142int
1143be_rxf_promiscuous(struct be_function_object *pfob,
1144 bool enable_port0, bool enable_port1,
1145 mcc_wrb_cqe_callback cb, void *cb_context,
1146 struct be_promiscuous_q_ctxt *q_ctxt)
1147{
1148 struct FWCMD_ETH_PROMISCUOUS *fwcmd = NULL;
1149 struct MCC_WRB_AMAP *wrb = NULL;
1150 int status = 0;
1151 struct be_generic_q_ctxt *generic_ctxt = NULL;
1152 unsigned long irql;
1153
1154
1155 spin_lock_irqsave(&pfob->post_lock, irql);
1156
1157 wrb = be_function_peek_mcc_wrb(pfob);
1158
1159 if (!wrb) {
1160 if (q_ctxt && cb) {
1161 wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
1162 generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
1163 generic_ctxt->context.bytes = sizeof(*q_ctxt);
1164 } else {
1165 status = BE_STATUS_NO_MCC_WRB;
1166 goto Error;
1167 }
1168 }
1169 /* Prepares an embedded fwcmd, including request/response sizes. */
1170 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_PROMISCUOUS);
1171
1172 fwcmd->params.request.port0_promiscuous = enable_port0;
1173 fwcmd->params.request.port1_promiscuous = enable_port1;
1174
1175 /* Post the f/w command */
1176 status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
1177 cb, cb_context, NULL, NULL, fwcmd, NULL);
1178
1179 if (status < 0) {
1180 TRACE(DL_ERR, "promiscuous fwcmd failed.");
1181 goto Error;
1182 }
1183
1184Error:
1185 spin_unlock_irqrestore(&pfob->post_lock, irql);
1186 if (pfob->pend_queue_driving && pfob->mcc) {
1187 pfob->pend_queue_driving = 0;
1188 be_drive_mcc_wrb_queue(pfob->mcc);
1189 }
1190 return status;
1191}
1192
1193
1194/*
1195 *-------------------------------------------------------------------------
1196 * Function: be_rxf_filter_config
1197 * Configures BladeEngine ethernet receive filter settings.
1198 * pfob -
1199 * settings - Pointer to the requested filter settings.
1200 * The response from BladeEngine will be placed back
1201 * in this structure.
1202 * cb - optional
1203 * cb_context - optional
1204 * q_ctxt - Optional. Pointer to a previously allocated struct.
1205 * If the MCC WRB ring is full, this structure is
1206 * used to queue the operation. It will be posted
1207 * to the MCC ring when space becomes available. All
1208 * queued commands will be posted to the ring in
1209 * the order they are received. It is always valid
1210 * to pass a pointer to a generic
1211 * be_generic_q_ctxt. However, the specific
1212 * context structs are generally smaller than
1213 * the generic struct.
1214 * return pend_status - BE_SUCCESS (0) on success.
1215 * BE_PENDING (postive value) if the FWCMD
1216 * completion is pending. Negative error code on failure.
1217 *---------------------------------------------------------------------------
1218 */
1219int
1220be_rxf_filter_config(struct be_function_object *pfob,
1221 struct NTWK_RX_FILTER_SETTINGS *settings,
1222 mcc_wrb_cqe_callback cb, void *cb_context,
1223 struct be_rxf_filter_q_ctxt *q_ctxt)
1224{
1225 struct FWCMD_COMMON_NTWK_RX_FILTER *fwcmd = NULL;
1226 struct MCC_WRB_AMAP *wrb = NULL;
1227 int status = 0;
1228 struct be_generic_q_ctxt *generic_ctxt = NULL;
1229 unsigned long irql;
1230 struct be_mcc_wrb_response_copy rc;
1231
1232 ASSERT(settings);
1233
1234 spin_lock_irqsave(&pfob->post_lock, irql);
1235
1236 wrb = be_function_peek_mcc_wrb(pfob);
1237
1238 if (!wrb) {
1239 if (q_ctxt && cb) {
1240 wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
1241 generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
1242 generic_ctxt->context.bytes = sizeof(*q_ctxt);
1243 } else {
1244 status = BE_STATUS_NO_MCC_WRB;
1245 goto Error;
1246 }
1247 }
1248 /* Prepares an embedded fwcmd, including request/response sizes. */
1249 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_RX_FILTER);
1250 memcpy(&fwcmd->params.request, settings, sizeof(*settings));
1251
1252 rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_RX_FILTER,
1253 params.response);
1254 rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_RX_FILTER,
1255 params.response);
1256 rc.va = settings;
1257 /* Post or queue the f/w command */
1258 status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
1259 cb, cb_context, NULL, NULL, fwcmd, &rc);
1260
1261 if (status < 0) {
1262 TRACE(DL_ERR, "RXF/ERX filter config fwcmd failed.");
1263 goto Error;
1264 }
1265
1266Error:
1267 spin_unlock_irqrestore(&pfob->post_lock, irql);
1268 if (pfob->pend_queue_driving && pfob->mcc) {
1269 pfob->pend_queue_driving = 0;
1270 be_drive_mcc_wrb_queue(pfob->mcc);
1271 }
1272 return status;
1273}
diff --git a/drivers/staging/benet/etx_context.h b/drivers/staging/benet/etx_context.h
deleted file mode 100644
index 554fbe5d127b..000000000000
--- a/drivers/staging/benet/etx_context.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __etx_context_amap_h__
21#define __etx_context_amap_h__
22
23/* ETX ring context structure. */
24struct BE_ETX_CONTEXT_AMAP {
25 u8 tx_cidx[11]; /* DWORD 0 */
26 u8 rsvd0[5]; /* DWORD 0 */
27 u8 rsvd1[16]; /* DWORD 0 */
28 u8 tx_pidx[11]; /* DWORD 1 */
29 u8 rsvd2; /* DWORD 1 */
30 u8 tx_ring_size[4]; /* DWORD 1 */
31 u8 pd_id[5]; /* DWORD 1 */
32 u8 pd_id_not_valid; /* DWORD 1 */
33 u8 cq_id_send[10]; /* DWORD 1 */
34 u8 rsvd3[32]; /* DWORD 2 */
35 u8 rsvd4[32]; /* DWORD 3 */
36 u8 cur_bytes[32]; /* DWORD 4 */
37 u8 max_bytes[32]; /* DWORD 5 */
38 u8 time_stamp[32]; /* DWORD 6 */
39 u8 rsvd5[11]; /* DWORD 7 */
40 u8 func; /* DWORD 7 */
41 u8 rsvd6[20]; /* DWORD 7 */
42 u8 cur_txd_count[32]; /* DWORD 8 */
43 u8 max_txd_count[32]; /* DWORD 9 */
44 u8 rsvd7[32]; /* DWORD 10 */
45 u8 rsvd8[32]; /* DWORD 11 */
46 u8 rsvd9[32]; /* DWORD 12 */
47 u8 rsvd10[32]; /* DWORD 13 */
48 u8 rsvd11[32]; /* DWORD 14 */
49 u8 rsvd12[32]; /* DWORD 15 */
50} __packed;
51struct ETX_CONTEXT_AMAP {
52 u32 dw[16];
53};
54
55#endif /* __etx_context_amap_h__ */
diff --git a/drivers/staging/benet/funcobj.c b/drivers/staging/benet/funcobj.c
deleted file mode 100644
index 0f57eb58daef..000000000000
--- a/drivers/staging/benet/funcobj.c
+++ /dev/null
@@ -1,565 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#include "hwlib.h"
18#include "bestatus.h"
19
20
21int
22be_function_internal_query_firmware_config(struct be_function_object *pfob,
23 struct BE_FIRMWARE_CONFIG *config)
24{
25 struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL;
26 struct MCC_WRB_AMAP *wrb = NULL;
27 int status = 0;
28 unsigned long irql;
29 struct be_mcc_wrb_response_copy rc;
30
31 spin_lock_irqsave(&pfob->post_lock, irql);
32
33 wrb = be_function_peek_mcc_wrb(pfob);
34 if (!wrb) {
35 TRACE(DL_ERR, "MCC wrb peek failed.");
36 status = BE_STATUS_NO_MCC_WRB;
37 goto error;
38 }
39 /* Prepares an embedded fwcmd, including request/response sizes. */
40 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG);
41
42 rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG,
43 params.response);
44 rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG,
45 params.response);
46 rc.va = config;
47
48 /* Post the f/w command */
49 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL,
50 NULL, NULL, NULL, fwcmd, &rc);
51error:
52 spin_unlock_irqrestore(&pfob->post_lock, irql);
53 if (pfob->pend_queue_driving && pfob->mcc) {
54 pfob->pend_queue_driving = 0;
55 be_drive_mcc_wrb_queue(pfob->mcc);
56 }
57 return status;
58}
59
60/*
61 This allocates and initializes a function object based on the information
62 provided by upper layer drivers.
63
64 Returns BE_SUCCESS on success and an appropriate int on failure.
65
66 A function object represents a single BladeEngine (logical) PCI function.
67 That is a function object either represents
68 the networking side of BladeEngine or the iSCSI side of BladeEngine.
69
70 This routine will also detect and create an appropriate PD object for the
71 PCI function as needed.
72*/
73int
74be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
75 u8 __iomem *pci_va, u32 function_type,
76 struct ring_desc *mailbox, struct be_function_object *pfob)
77{
78 int status;
79
80 ASSERT(pfob); /* not a magic assert */
81 ASSERT(function_type <= 2);
82
83 TRACE(DL_INFO, "Create function object. type:%s object:0x%p",
84 (function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" :
85 (function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" :
86 "Arm")), pfob);
87
88 memset(pfob, 0, sizeof(*pfob));
89
90 pfob->type = function_type;
91 pfob->csr_va = csr_va;
92 pfob->db_va = db_va;
93 pfob->pci_va = pci_va;
94
95 spin_lock_init(&pfob->cq_lock);
96 spin_lock_init(&pfob->post_lock);
97 spin_lock_init(&pfob->mcc_context_lock);
98
99
100 pfob->pci_function_number = 1;
101
102
103 pfob->emulate = false;
104 TRACE(DL_NOTE, "Non-emulation mode");
105 status = be_drive_POST(pfob);
106 if (status != BE_SUCCESS) {
107 TRACE(DL_ERR, "BladeEngine POST failed.");
108 goto error;
109 }
110
111 /* Initialize the mailbox */
112 status = be_mpu_init_mailbox(pfob, mailbox);
113 if (status != BE_SUCCESS) {
114 TRACE(DL_ERR, "Failed to initialize mailbox.");
115 goto error;
116 }
117 /*
118 * Cache the firmware config for ASSERTs in hwclib and later
119 * driver queries.
120 */
121 status = be_function_internal_query_firmware_config(pfob,
122 &pfob->fw_config);
123 if (status != BE_SUCCESS) {
124 TRACE(DL_ERR, "Failed to query firmware config.");
125 goto error;
126 }
127
128error:
129 if (status != BE_SUCCESS) {
130 /* No cleanup necessary */
131 TRACE(DL_ERR, "Failed to create function.");
132 memset(pfob, 0, sizeof(*pfob));
133 }
134 return status;
135}
136
137/*
138 This routine drops the reference count on a given function object. Once
139 the reference count falls to zero, the function object is destroyed and all
140 resources held are freed.
141
142 FunctionObject - The function object to drop the reference to.
143*/
144int be_function_object_destroy(struct be_function_object *pfob)
145{
146 TRACE(DL_INFO, "Destroy pfob. Object:0x%p",
147 pfob);
148
149
150 ASSERT(pfob->mcc == NULL);
151
152 return BE_SUCCESS;
153}
154
155int be_function_cleanup(struct be_function_object *pfob)
156{
157 int status = 0;
158 u32 isr;
159 u32 host_intr;
160 struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
161
162
163 if (pfob->type == BE_FUNCTION_TYPE_NETWORK) {
164 status = be_rxf_multicast_config(pfob, false, 0,
165 NULL, NULL, NULL, NULL);
166 ASSERT(status == BE_SUCCESS);
167 }
168 /* VLAN */
169 status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL);
170 ASSERT(status == BE_SUCCESS);
171 /*
172 * MCC Queue -- Switches to mailbox mode. May want to destroy
173 * all but the MCC CQ before this call if polling CQ is much better
174 * performance than polling mailbox register.
175 */
176 if (pfob->mcc)
177 status = be_mcc_ring_destroy(pfob->mcc);
178 /*
179 * If interrupts are disabled, clear any CEV interrupt assertions that
180 * fired after we stopped processing EQs.
181 */
182 ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl);
183 host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
184 hostintr, ctrl.dw);
185 if (!host_intr)
186 if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
187 isr = CSR_READ(pfob, cev.isr1);
188 else
189 isr = CSR_READ(pfob, cev.isr0);
190 else
191 /* This should never happen... */
192 TRACE(DL_ERR, "function_cleanup called with interrupt enabled");
193 /* Function object destroy */
194 status = be_function_object_destroy(pfob);
195 ASSERT(status == BE_SUCCESS);
196
197 return status;
198}
199
200
201void *
202be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
203 struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length,
204 u32 response_length, u32 opcode, u32 subsystem)
205{
206 struct FWCMD_REQUEST_HEADER *header = NULL;
207 u32 n;
208
209 ASSERT(wrb);
210
211 n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
212 AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1);
213 AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n));
214 header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
215
216 header->timeout = 0;
217 header->domain = 0;
218 header->request_length = max(request_length, response_length);
219 header->opcode = opcode;
220 header->subsystem = subsystem;
221
222 return header;
223}
224
225void *
226be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
227 struct MCC_WRB_AMAP *wrb,
228 void *fwcmd_va, u64 fwcmd_pa,
229 u32 payld_len,
230 u32 request_length,
231 u32 response_length,
232 u32 opcode, u32 subsystem)
233{
234 struct FWCMD_REQUEST_HEADER *header = NULL;
235 u32 n;
236 struct MCC_WRB_PAYLOAD_AMAP *plp;
237
238 ASSERT(wrb);
239 ASSERT(fwcmd_va);
240
241 header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va;
242
243 AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0);
244 AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len);
245
246 /*
247 * Assume one fragment. The caller may override the SGL by
248 * rewriting the 0th length and adding more entries. They
249 * will also need to update the sge_count.
250 */
251 AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1);
252
253 n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
254 plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n);
255 AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len);
256 AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa);
257 AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp,
258 upper_32_bits(fwcmd_pa));
259
260 header->timeout = 0;
261 header->domain = 0;
262 header->request_length = max(request_length, response_length);
263 header->opcode = opcode;
264 header->subsystem = subsystem;
265
266 return header;
267}
268
269struct MCC_WRB_AMAP *
270be_function_peek_mcc_wrb(struct be_function_object *pfob)
271{
272 struct MCC_WRB_AMAP *wrb = NULL;
273 u32 offset;
274
275 if (pfob->mcc)
276 wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false);
277 else {
278 offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
279 wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va +
280 offset);
281 }
282
283 if (wrb)
284 memset(wrb, 0, sizeof(struct MCC_WRB_AMAP));
285
286 return wrb;
287}
288
289#if defined(BE_DEBUG)
290void be_function_debug_print_wrb(struct be_function_object *pfob,
291 struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va,
292 struct be_mcc_wrb_context *wrb_context)
293{
294
295 struct FWCMD_REQUEST_HEADER *header = NULL;
296 u8 embedded;
297 u32 n;
298
299 embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb);
300
301 if (embedded) {
302 n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
303 header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
304 } else {
305 header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va;
306 }
307
308 /* Save the completed count before posting for a debug assert. */
309
310 if (header) {
311 wrb_context->opcode = header->opcode;
312 wrb_context->subsystem = header->subsystem;
313
314 } else {
315 wrb_context->opcode = 0;
316 wrb_context->subsystem = 0;
317 }
318}
319#else
320#define be_function_debug_print_wrb(a_, b_, c_, d_)
321#endif
322
323int
324be_function_post_mcc_wrb(struct be_function_object *pfob,
325 struct MCC_WRB_AMAP *wrb,
326 struct be_generic_q_ctxt *q_ctxt,
327 mcc_wrb_cqe_callback cb, void *cb_context,
328 mcc_wrb_cqe_callback internal_cb,
329 void *internal_cb_context, void *optional_fwcmd_va,
330 struct be_mcc_wrb_response_copy *rc)
331{
332 int status;
333 struct be_mcc_wrb_context *wrb_context = NULL;
334 u64 *p;
335
336 if (q_ctxt) {
337 /* Initialize context. */
338 q_ctxt->context.internal_cb = internal_cb;
339 q_ctxt->context.internal_cb_context = internal_cb_context;
340 q_ctxt->context.cb = cb;
341 q_ctxt->context.cb_context = cb_context;
342 if (rc) {
343 q_ctxt->context.copy.length = rc->length;
344 q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset;
345 q_ctxt->context.copy.va = rc->va;
346 } else
347 q_ctxt->context.copy.length = 0;
348
349 q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va;
350
351 /* Queue this request */
352 status = be_function_queue_mcc_wrb(pfob, q_ctxt);
353
354 goto Error;
355 }
356 /*
357 * Allocate a WRB context struct to hold the callback pointers,
358 * status, etc. This is required if commands complete out of order.
359 */
360 wrb_context = _be_mcc_allocate_wrb_context(pfob);
361 if (!wrb_context) {
362 TRACE(DL_WARN, "Failed to allocate MCC WRB context.");
363 status = BE_STATUS_SYSTEM_RESOURCES;
364 goto Error;
365 }
366 /* Initialize context. */
367 memset(wrb_context, 0, sizeof(*wrb_context));
368 wrb_context->internal_cb = internal_cb;
369 wrb_context->internal_cb_context = internal_cb_context;
370 wrb_context->cb = cb;
371 wrb_context->cb_context = cb_context;
372 if (rc) {
373 wrb_context->copy.length = rc->length;
374 wrb_context->copy.fwcmd_offset = rc->fwcmd_offset;
375 wrb_context->copy.va = rc->va;
376 } else
377 wrb_context->copy.length = 0;
378 wrb_context->wrb = wrb;
379
380 /*
381 * Copy the context pointer into the WRB opaque tag field.
382 * Verify assumption of 64-bit tag with a compile time assert.
383 */
384 p = (u64 *) ((u8 *)wrb + offsetof(struct BE_MCC_WRB_AMAP, tag)/8);
385 *p = (u64)(size_t)wrb_context;
386
387 /* Print info about this FWCMD for debug builds. */
388 be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context);
389
390 /*
391 * issue the WRB to the MPU as appropriate
392 */
393 if (pfob->mcc) {
394 /*
395 * we're in WRB mode, pass to the mcc layer
396 */
397 status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context);
398 } else {
399 /*
400 * we're in mailbox mode
401 */
402 status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context);
403
404 /* mailbox mode always completes synchronously */
405 ASSERT(status != BE_STATUS_PENDING);
406 }
407
408Error:
409
410 return status;
411}
412
413int
414be_function_ring_destroy(struct be_function_object *pfob,
415 u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
416 void *cb_context, mcc_wrb_cqe_callback internal_cb,
417 void *internal_cb_context)
418{
419
420 struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
421 struct MCC_WRB_AMAP *wrb = NULL;
422 int status = 0;
423 unsigned long irql;
424
425 spin_lock_irqsave(&pfob->post_lock, irql);
426
427 TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type);
428
429 wrb = be_function_peek_mcc_wrb(pfob);
430 if (!wrb) {
431 ASSERT(wrb);
432 TRACE(DL_ERR, "No free MCC WRBs in destroy ring.");
433 status = BE_STATUS_NO_MCC_WRB;
434 goto Error;
435 }
436 /* Prepares an embedded fwcmd, including request/response sizes. */
437 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
438
439 fwcmd->params.request.id = id;
440 fwcmd->params.request.ring_type = ring_type;
441
442 /* Post the f/w command */
443 status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
444 internal_cb, internal_cb_context, fwcmd, NULL);
445 if (status != BE_SUCCESS && status != BE_PENDING) {
446 TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d",
447 id, ring_type);
448 goto Error;
449 }
450
451Error:
452 spin_unlock_irqrestore(&pfob->post_lock, irql);
453 if (pfob->pend_queue_driving && pfob->mcc) {
454 pfob->pend_queue_driving = 0;
455 be_drive_mcc_wrb_queue(pfob->mcc);
456 }
457 return status;
458}
459
460void
461be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num)
462{
463 u32 num_pages = PAGES_SPANNED(rd->va, rd->length);
464 u32 i = 0;
465 u64 pa = rd->pa;
466 __le64 lepa;
467
468 ASSERT(pa_list);
469 ASSERT(pa);
470
471 for (i = 0; i < min(num_pages, max_num); i++) {
472 lepa = cpu_to_le64(pa);
473 pa_list[i].lo = (u32)lepa;
474 pa_list[i].hi = upper_32_bits(lepa);
475 pa += PAGE_SIZE;
476 }
477}
478
479
480
481/*-----------------------------------------------------------------------------
482 * Function: be_function_get_fw_version
483 * Retrieves the firmware version on the adpater. If the callback is
484 * NULL this call executes synchronously. If the callback is not NULL,
485 * the returned status will be BE_PENDING if the command was issued
486 * successfully.
487 * pfob -
488 * fwv - Pointer to response buffer if callback is NULL.
489 * cb - Callback function invoked when the FWCMD completes.
490 * cb_context - Passed to the callback function.
491 * return pend_status - BE_SUCCESS (0) on success.
492 * BE_PENDING (postive value) if the FWCMD
493 * completion is pending. Negative error code on failure.
494 *---------------------------------------------------------------------------
495 */
496int
497be_function_get_fw_version(struct be_function_object *pfob,
498 struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv,
499 mcc_wrb_cqe_callback cb, void *cb_context)
500{
501 int status = BE_SUCCESS;
502 struct MCC_WRB_AMAP *wrb = NULL;
503 struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL;
504 unsigned long irql;
505 struct be_mcc_wrb_response_copy rc;
506
507 spin_lock_irqsave(&pfob->post_lock, irql);
508
509 wrb = be_function_peek_mcc_wrb(pfob);
510 if (!wrb) {
511 TRACE(DL_ERR, "MCC wrb peek failed.");
512 status = BE_STATUS_NO_MCC_WRB;
513 goto Error;
514 }
515
516 if (!cb && !fwv) {
517 TRACE(DL_ERR, "callback and response buffer NULL!");
518 status = BE_NOT_OK;
519 goto Error;
520 }
521 /* Prepares an embedded fwcmd, including request/response sizes. */
522 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION);
523
524 rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION,
525 params.response);
526 rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION,
527 params.response);
528 rc.va = fwv;
529
530 /* Post the f/w command */
531 status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
532 cb_context, NULL, NULL, fwcmd, &rc);
533
534Error:
535 spin_unlock_irqrestore(&pfob->post_lock, irql);
536 if (pfob->pend_queue_driving && pfob->mcc) {
537 pfob->pend_queue_driving = 0;
538 be_drive_mcc_wrb_queue(pfob->mcc);
539 }
540 return status;
541}
542
543int
544be_function_queue_mcc_wrb(struct be_function_object *pfob,
545 struct be_generic_q_ctxt *q_ctxt)
546{
547 int status;
548
549 ASSERT(q_ctxt);
550
551 /*
552 * issue the WRB to the MPU as appropriate
553 */
554 if (pfob->mcc) {
555
556 /* We're in ring mode. Queue this item. */
557 pfob->mcc->backlog_length++;
558 list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog);
559 status = BE_PENDING;
560 } else {
561 status = BE_NOT_OK;
562 }
563 return status;
564}
565
diff --git a/drivers/staging/benet/fwcmd_common.h b/drivers/staging/benet/fwcmd_common.h
deleted file mode 100644
index 406e0d6fa985..000000000000
--- a/drivers/staging/benet/fwcmd_common.h
+++ /dev/null
@@ -1,222 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __fwcmd_common_amap_h__
21#define __fwcmd_common_amap_h__
22#include "host_struct.h"
23
24/* --- PHY_LINK_DUPLEX_ENUM --- */
25#define PHY_LINK_DUPLEX_NONE (0)
26#define PHY_LINK_DUPLEX_HALF (1)
27#define PHY_LINK_DUPLEX_FULL (2)
28
29/* --- PHY_LINK_SPEED_ENUM --- */
30#define PHY_LINK_SPEED_ZERO (0) /* No link. */
31#define PHY_LINK_SPEED_10MBPS (1) /* 10 Mbps */
32#define PHY_LINK_SPEED_100MBPS (2) /* 100 Mbps */
33#define PHY_LINK_SPEED_1GBPS (3) /* 1 Gbps */
34#define PHY_LINK_SPEED_10GBPS (4) /* 10 Gbps */
35
36/* --- PHY_LINK_FAULT_ENUM --- */
37#define PHY_LINK_FAULT_NONE (0) /* No fault status
38 available or detected */
39#define PHY_LINK_FAULT_LOCAL (1) /* Local fault detected */
40#define PHY_LINK_FAULT_REMOTE (2) /* Remote fault detected */
41
42/* --- BE_ULP_MASK --- */
43#define BE_ULP0_MASK (1)
44#define BE_ULP1_MASK (2)
45#define BE_ULP2_MASK (4)
46
47/* --- NTWK_ACTIVE_PORT --- */
48#define NTWK_PORT_A (0) /* Port A is currently active */
49#define NTWK_PORT_B (1) /* Port B is currently active */
50#define NTWK_NO_ACTIVE_PORT (15) /* Both ports have lost link */
51
52/* --- NTWK_LINK_TYPE --- */
53#define NTWK_LINK_TYPE_PHYSICAL (0) /* link up/down event
54 applies to BladeEngine's
55 Physical Ports
56 */
57#define NTWK_LINK_TYPE_VIRTUAL (1) /* Virtual link up/down event
58 reported by BladeExchange.
59 This applies only when the
60 VLD feature is enabled
61 */
62
63/*
64 * --- FWCMD_MAC_TYPE_ENUM ---
65 * This enum defines the types of MAC addresses in the RXF MAC Address Table.
66 */
67#define MAC_ADDRESS_TYPE_STORAGE (0) /* Storage MAC Address */
68#define MAC_ADDRESS_TYPE_NETWORK (1) /* Network MAC Address */
69#define MAC_ADDRESS_TYPE_PD (2) /* Protection Domain MAC Addr */
70#define MAC_ADDRESS_TYPE_MANAGEMENT (3) /* Managment MAC Address */
71
72
73/* --- FWCMD_RING_TYPE_ENUM --- */
74#define FWCMD_RING_TYPE_ETH_RX (1) /* Ring created with */
75 /* FWCMD_COMMON_ETH_RX_CREATE. */
76#define FWCMD_RING_TYPE_ETH_TX (2) /* Ring created with */
77 /* FWCMD_COMMON_ETH_TX_CREATE. */
78#define FWCMD_RING_TYPE_ISCSI_WRBQ (3) /* Ring created with */
79 /* FWCMD_COMMON_ISCSI_WRBQ_CREATE. */
80#define FWCMD_RING_TYPE_ISCSI_DEFQ (4) /* Ring created with */
81 /* FWCMD_COMMON_ISCSI_DEFQ_CREATE. */
82#define FWCMD_RING_TYPE_TPM_WRBQ (5) /* Ring created with */
83 /* FWCMD_COMMON_TPM_WRBQ_CREATE. */
84#define FWCMD_RING_TYPE_TPM_DEFQ (6) /* Ring created with */
85 /* FWCMD_COMMONTPM_TDEFQ_CREATE. */
86#define FWCMD_RING_TYPE_TPM_RQ (7) /* Ring created with */
87 /* FWCMD_COMMON_TPM_RQ_CREATE. */
88#define FWCMD_RING_TYPE_MCC (8) /* Ring created with */
89 /* FWCMD_COMMON_MCC_CREATE. */
90#define FWCMD_RING_TYPE_CQ (9) /* Ring created with */
91 /* FWCMD_COMMON_CQ_CREATE. */
92#define FWCMD_RING_TYPE_EQ (10) /* Ring created with */
93 /* FWCMD_COMMON_EQ_CREATE. */
94#define FWCMD_RING_TYPE_QP (11) /* Ring created with */
95 /* FWCMD_RDMA_QP_CREATE. */
96
97
98/* --- ETH_TX_RING_TYPE_ENUM --- */
99#define ETH_TX_RING_TYPE_FORWARDING (1) /* Ethernet ring for
100 forwarding packets */
101#define ETH_TX_RING_TYPE_STANDARD (2) /* Ethernet ring for sending
102 network packets. */
103#define ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring bound to the
104 port specified in the command
105 header.port_number field.
106 Rings of this type are
107 NOT subject to the
108 failover logic implemented
109 in the BladeEngine.
110 */
111
112/* --- FWCMD_COMMON_QOS_TYPE_ENUM --- */
113#define QOS_BITS_NIC (1) /* max_bits_per_second_NIC */
114 /* field is valid. */
115#define QOS_PKTS_NIC (2) /* max_packets_per_second_NIC */
116 /* field is valid. */
117#define QOS_IOPS_ISCSI (4) /* max_ios_per_second_iSCSI */
118 /*field is valid. */
119#define QOS_VLAN_TAG (8) /* domain_VLAN_tag field
120 is valid. */
121#define QOS_FABRIC_ID (16) /* fabric_domain_ID field
122 is valid. */
123#define QOS_OEM_PARAMS (32) /* qos_params_oem field
124 is valid. */
125#define QOS_TPUT_ISCSI (64) /* max_bytes_per_second_iSCSI
126 field is valid. */
127
128
129/*
130 * --- FAILOVER_CONFIG_ENUM ---
131 * Failover configuration setting used in FWCMD_COMMON_FORCE_FAILOVER
132 */
133#define FAILOVER_CONFIG_NO_CHANGE (0) /* No change to automatic */
134 /* port failover setting. */
135#define FAILOVER_CONFIG_ON (1) /* Automatic port failover
136 on link down is enabled. */
137#define FAILOVER_CONFIG_OFF (2) /* Automatic port failover
138 on link down is disabled. */
139
140/*
141 * --- FAILOVER_PORT_ENUM ---
142 * Failover port setting used in FWCMD_COMMON_FORCE_FAILOVER
143 */
144#define FAILOVER_PORT_A (0) /* Selects port A. */
145#define FAILOVER_PORT_B (1) /* Selects port B. */
146#define FAILOVER_PORT_NONE (15) /* No port change requested. */
147
148
149/*
150 * --- MGMT_FLASHROM_OPCODE ---
151 * Flash ROM operation code
152 */
153#define MGMT_FLASHROM_OPCODE_FLASH (1) /* Commit downloaded data
154 to Flash ROM */
155#define MGMT_FLASHROM_OPCODE_SAVE (2) /* Save downloaded data to
156 ARM's DDR - do not flash */
157#define MGMT_FLASHROM_OPCODE_CLEAR (3) /* Erase specified component
158 from FlashROM */
159#define MGMT_FLASHROM_OPCODE_REPORT (4) /* Read specified component
160 from Flash ROM */
161#define MGMT_FLASHROM_OPCODE_IMAGE_INFO (5) /* Returns size of a
162 component */
163
164/*
165 * --- MGMT_FLASHROM_OPTYPE ---
166 * Flash ROM operation type
167 */
168#define MGMT_FLASHROM_OPTYPE_CODE_FIRMWARE (0) /* Includes ARM firmware,
169 IPSec (optional) and EP
170 firmware */
171#define MGMT_FLASHROM_OPTYPE_CODE_REDBOOT (1)
172#define MGMT_FLASHROM_OPTYPE_CODE_BIOS (2)
173#define MGMT_FLASHROM_OPTYPE_CODE_PXE_BIOS (3)
174#define MGMT_FLASHROM_OPTYPE_CODE_CTRLS (4)
175#define MGMT_FLASHROM_OPTYPE_CFG_IPSEC (5)
176#define MGMT_FLASHROM_OPTYPE_CFG_INI (6)
177#define MGMT_FLASHROM_OPTYPE_ROM_OFFSET_SPECIFIED (7)
178
179/*
180 * --- FLASHROM_TYPE ---
181 * Flash ROM manufacturers supported in the f/w
182 */
183#define INTEL (0)
184#define SPANSION (1)
185#define MICRON (2)
186
187/* --- DDR_CAS_TYPE --- */
188#define CAS_3 (0)
189#define CAS_4 (1)
190#define CAS_5 (2)
191
192/* --- DDR_SIZE_TYPE --- */
193#define SIZE_256MB (0)
194#define SIZE_512MB (1)
195
196/* --- DDR_MODE_TYPE --- */
197#define DDR_NO_ECC (0)
198#define DDR_ECC (1)
199
200/* --- INTERFACE_10GB_TYPE --- */
201#define CX4_TYPE (0)
202#define XFP_TYPE (1)
203
204/* --- BE_CHIP_MAX_MTU --- */
205#define CHIP_MAX_MTU (9000)
206
207/* --- XAUI_STATE_ENUM --- */
208#define XAUI_STATE_ENABLE (0) /* This MUST be the default
209 value for all requests
210 which set/change
211 equalization parameter. */
212#define XAUI_STATE_DISABLE (255) /* The XAUI for both ports
213 may be disabled for EMI
214 tests. There is no
215 provision for turning off
216 individual ports.
217 */
218/* --- BE_ASIC_REVISION --- */
219#define BE_ASIC_REV_A0 (1)
220#define BE_ASIC_REV_A1 (2)
221
222#endif /* __fwcmd_common_amap_h__ */
diff --git a/drivers/staging/benet/fwcmd_common_bmap.h b/drivers/staging/benet/fwcmd_common_bmap.h
deleted file mode 100644
index a007cf276500..000000000000
--- a/drivers/staging/benet/fwcmd_common_bmap.h
+++ /dev/null
@@ -1,717 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __fwcmd_common_bmap_h__
21#define __fwcmd_common_bmap_h__
22#include "fwcmd_types_bmap.h"
23#include "fwcmd_hdr_bmap.h"
24
25#if defined(__BIG_ENDIAN)
26 /* Physical Address. */
27struct PHYS_ADDR {
28 union {
29 struct {
30 u32 lo; /* DWORD 0 */
31 u32 hi; /* DWORD 1 */
32 } __packed; /* unnamed struct */
33 u32 dw[2]; /* dword union */
34 }; /* unnamed union */
35} __packed ;
36
37
38#else
39 /* Physical Address. */
40struct PHYS_ADDR {
41 union {
42 struct {
43 u32 lo; /* DWORD 0 */
44 u32 hi; /* DWORD 1 */
45 } __packed; /* unnamed struct */
46 u32 dw[2]; /* dword union */
47 }; /* unnamed union */
48} __packed ;
49
50struct BE_LINK_STATUS {
51 u8 mac0_duplex;
52 u8 mac0_speed;
53 u8 mac1_duplex;
54 u8 mac1_speed;
55 u8 mgmt_mac_duplex;
56 u8 mgmt_mac_speed;
57 u8 active_port;
58 u8 rsvd0;
59 u8 mac0_fault;
60 u8 mac1_fault;
61 u16 rsvd1;
62} __packed;
63#endif
64
65struct FWCMD_COMMON_ANON_170_REQUEST {
66 u32 rsvd0;
67} __packed;
68
69union LINK_STATUS_QUERY_PARAMS {
70 struct BE_LINK_STATUS response;
71 struct FWCMD_COMMON_ANON_170_REQUEST request;
72} __packed;
73
74/*
75 * Queries the the link status for all ports. The valid values below
76 * DO NOT indicate that a particular duplex or speed is supported by
77 * BladeEngine. These enumerations simply list all possible duplexes
78 * and speeds for any port. Consult BladeEngine product documentation
79 * for the supported parameters.
80 */
81struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY {
82 union FWCMD_HEADER header;
83 union LINK_STATUS_QUERY_PARAMS params;
84} __packed;
85
86struct FWCMD_COMMON_ANON_171_REQUEST {
87 u8 type;
88 u8 port;
89 u8 mac1;
90 u8 permanent;
91} __packed;
92
93struct FWCMD_COMMON_ANON_172_RESPONSE {
94 struct MAC_ADDRESS_FORMAT mac;
95} __packed;
96
97union NTWK_MAC_QUERY_PARAMS {
98 struct FWCMD_COMMON_ANON_171_REQUEST request;
99 struct FWCMD_COMMON_ANON_172_RESPONSE response;
100} __packed;
101
102/* Queries one MAC address. */
103struct FWCMD_COMMON_NTWK_MAC_QUERY {
104 union FWCMD_HEADER header;
105 union NTWK_MAC_QUERY_PARAMS params;
106} __packed;
107
108struct MAC_SET_PARAMS_IN {
109 u8 type;
110 u8 port;
111 u8 mac1;
112 u8 invalidate;
113 struct MAC_ADDRESS_FORMAT mac;
114} __packed;
115
116struct MAC_SET_PARAMS_OUT {
117 u32 rsvd0;
118} __packed;
119
120union MAC_SET_PARAMS {
121 struct MAC_SET_PARAMS_IN request;
122 struct MAC_SET_PARAMS_OUT response;
123} __packed;
124
125/* Sets a MAC address. */
126struct FWCMD_COMMON_NTWK_MAC_SET {
127 union FWCMD_HEADER header;
128 union MAC_SET_PARAMS params;
129} __packed;
130
131/* MAC address list. */
132struct NTWK_MULTICAST_MAC_LIST {
133 u8 byte[6];
134} __packed;
135
136struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD {
137 u16 num_mac;
138 u8 promiscuous;
139 u8 rsvd0;
140 struct NTWK_MULTICAST_MAC_LIST mac[32];
141} __packed;
142
143struct FWCMD_COMMON_ANON_174_RESPONSE {
144 u32 rsvd0;
145} __packed;
146
147union FWCMD_COMMON_ANON_173_PARAMS {
148 struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD request;
149 struct FWCMD_COMMON_ANON_174_RESPONSE response;
150} __packed;
151
152/*
153 * Sets multicast address hash. The MPU will merge the MAC address lists
154 * from all clients, including the networking and storage functions.
155 * This command may fail if the final merged list of MAC addresses exceeds
156 * 32 entries.
157 */
158struct FWCMD_COMMON_NTWK_MULTICAST_SET {
159 union FWCMD_HEADER header;
160 union FWCMD_COMMON_ANON_173_PARAMS params;
161} __packed;
162
163struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD {
164 u16 num_vlan;
165 u8 promiscuous;
166 u8 rsvd0;
167 u16 vlan_tag[32];
168} __packed;
169
170struct FWCMD_COMMON_ANON_176_RESPONSE {
171 u32 rsvd0;
172} __packed;
173
174union FWCMD_COMMON_ANON_175_PARAMS {
175 struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD request;
176 struct FWCMD_COMMON_ANON_176_RESPONSE response;
177} __packed;
178
179/*
180 * Sets VLAN tag filter. The MPU will merge the VLAN tag list from all
181 * clients, including the networking and storage functions. This command
182 * may fail if the final vlan_tag array (from all functions) is longer
183 * than 32 entries.
184 */
185struct FWCMD_COMMON_NTWK_VLAN_CONFIG {
186 union FWCMD_HEADER header;
187 union FWCMD_COMMON_ANON_175_PARAMS params;
188} __packed;
189
190struct RING_DESTROY_REQUEST {
191 u16 ring_type;
192 u16 id;
193 u8 bypass_flush;
194 u8 rsvd0;
195 u16 rsvd1;
196} __packed;
197
198struct FWCMD_COMMON_ANON_190_RESPONSE {
199 u32 rsvd0;
200} __packed;
201
202union FWCMD_COMMON_ANON_189_PARAMS {
203 struct RING_DESTROY_REQUEST request;
204 struct FWCMD_COMMON_ANON_190_RESPONSE response;
205} __packed;
206/*
207 * Command for destroying any ring. The connection(s) using the ring should
208 * be quiesced before destroying the ring.
209 */
210struct FWCMD_COMMON_RING_DESTROY {
211 union FWCMD_HEADER header;
212 union FWCMD_COMMON_ANON_189_PARAMS params;
213} __packed;
214
215struct FWCMD_COMMON_ANON_192_REQUEST {
216 u16 num_pages;
217 u16 rsvd0;
218 struct CQ_CONTEXT_AMAP context;
219 struct PHYS_ADDR pages[4];
220} __packed ;
221
222struct FWCMD_COMMON_ANON_193_RESPONSE {
223 u16 cq_id;
224} __packed ;
225
226union FWCMD_COMMON_ANON_191_PARAMS {
227 struct FWCMD_COMMON_ANON_192_REQUEST request;
228 struct FWCMD_COMMON_ANON_193_RESPONSE response;
229} __packed ;
230
231/*
232 * Command for creating a completion queue. A Completion Queue must span
233 * at least 1 page and at most 4 pages. Each completion queue entry
234 * is 16 bytes regardless of CQ entry format. Thus the ring must be
235 * at least 256 entries deep (corresponding to 1 page) and can be at
236 * most 1024 entries deep (corresponding to 4 pages). The number of
237 * pages posted must contain the CQ ring size as encoded in the context.
238 *
239 */
240struct FWCMD_COMMON_CQ_CREATE {
241 union FWCMD_HEADER header;
242 union FWCMD_COMMON_ANON_191_PARAMS params;
243} __packed ;
244
245struct FWCMD_COMMON_ANON_198_REQUEST {
246 u16 num_pages;
247 u16 rsvd0;
248 struct EQ_CONTEXT_AMAP context;
249 struct PHYS_ADDR pages[8];
250} __packed ;
251
252struct FWCMD_COMMON_ANON_199_RESPONSE {
253 u16 eq_id;
254} __packed ;
255
256union FWCMD_COMMON_ANON_197_PARAMS {
257 struct FWCMD_COMMON_ANON_198_REQUEST request;
258 struct FWCMD_COMMON_ANON_199_RESPONSE response;
259} __packed ;
260
261/*
262 * Command for creating a event queue. An Event Queue must span at least
263 * 1 page and at most 8 pages. The number of pages posted must contain
264 * the EQ ring. The ring is defined by the size of the EQ entries (encoded
265 * in the context) and the number of EQ entries (also encoded in the
266 * context).
267 */
268struct FWCMD_COMMON_EQ_CREATE {
269 union FWCMD_HEADER header;
270 union FWCMD_COMMON_ANON_197_PARAMS params;
271} __packed ;
272
273struct FWCMD_COMMON_ANON_201_REQUEST {
274 u16 cq_id;
275 u16 bcmc_cq_id;
276 u16 num_pages;
277 u16 rsvd0;
278 struct PHYS_ADDR pages[2];
279} __packed;
280
281struct FWCMD_COMMON_ANON_202_RESPONSE {
282 u16 id;
283} __packed;
284
285union FWCMD_COMMON_ANON_200_PARAMS {
286 struct FWCMD_COMMON_ANON_201_REQUEST request;
287 struct FWCMD_COMMON_ANON_202_RESPONSE response;
288} __packed;
289
290/*
291 * Command for creating Ethernet receive ring. An ERX ring contains ETH_RX_D
292 * entries (8 bytes each). An ERX ring must be 1024 entries deep
293 * (corresponding to 2 pages).
294 */
295struct FWCMD_COMMON_ETH_RX_CREATE {
296 union FWCMD_HEADER header;
297 union FWCMD_COMMON_ANON_200_PARAMS params;
298} __packed;
299
300struct FWCMD_COMMON_ANON_204_REQUEST {
301 u16 num_pages;
302 u8 ulp_num;
303 u8 type;
304 struct ETX_CONTEXT_AMAP context;
305 struct PHYS_ADDR pages[8];
306} __packed ;
307
308struct FWCMD_COMMON_ANON_205_RESPONSE {
309 u16 cid;
310 u8 ulp_num;
311 u8 rsvd0;
312} __packed ;
313
314union FWCMD_COMMON_ANON_203_PARAMS {
315 struct FWCMD_COMMON_ANON_204_REQUEST request;
316 struct FWCMD_COMMON_ANON_205_RESPONSE response;
317} __packed ;
318
319/*
320 * Command for creating an Ethernet transmit ring. An ETX ring contains
321 * ETH_WRB entries (16 bytes each). An ETX ring must be at least 256
322 * entries deep (corresponding to 1 page) and at most 2k entries deep
323 * (corresponding to 8 pages).
324 */
325struct FWCMD_COMMON_ETH_TX_CREATE {
326 union FWCMD_HEADER header;
327 union FWCMD_COMMON_ANON_203_PARAMS params;
328} __packed ;
329
330struct FWCMD_COMMON_ANON_222_REQUEST {
331 u16 num_pages;
332 u16 rsvd0;
333 struct MCC_RING_CONTEXT_AMAP context;
334 struct PHYS_ADDR pages[8];
335} __packed ;
336
337struct FWCMD_COMMON_ANON_223_RESPONSE {
338 u16 id;
339} __packed ;
340
341union FWCMD_COMMON_ANON_221_PARAMS {
342 struct FWCMD_COMMON_ANON_222_REQUEST request;
343 struct FWCMD_COMMON_ANON_223_RESPONSE response;
344} __packed ;
345
346/*
347 * Command for creating the MCC ring. An MCC ring must be at least 16
348 * entries deep (corresponding to 1 page) and at most 128 entries deep
349 * (corresponding to 8 pages).
350 */
351struct FWCMD_COMMON_MCC_CREATE {
352 union FWCMD_HEADER header;
353 union FWCMD_COMMON_ANON_221_PARAMS params;
354} __packed ;
355
356struct GET_QOS_IN {
357 u32 qos_params_rsvd;
358} __packed;
359
360struct GET_QOS_OUT {
361 u32 max_bits_per_second_NIC;
362 u32 max_packets_per_second_NIC;
363 u32 max_ios_per_second_iSCSI;
364 u32 max_bytes_per_second_iSCSI;
365 u16 domain_VLAN_tag;
366 u16 fabric_domain_ID;
367 u32 qos_params_oem[4];
368} __packed;
369
370union GET_QOS_PARAMS {
371 struct GET_QOS_IN request;
372 struct GET_QOS_OUT response;
373} __packed;
374
375/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
376struct FWCMD_COMMON_GET_QOS {
377 union FWCMD_HEADER header;
378 union GET_QOS_PARAMS params;
379} __packed;
380
381struct SET_QOS_IN {
382 u32 valid_flags;
383 u32 max_bits_per_second_NIC;
384 u32 max_packets_per_second_NIC;
385 u32 max_ios_per_second_iSCSI;
386 u32 max_bytes_per_second_iSCSI;
387 u16 domain_VLAN_tag;
388 u16 fabric_domain_ID;
389 u32 qos_params_oem[4];
390} __packed;
391
392struct SET_QOS_OUT {
393 u32 qos_params_rsvd;
394} __packed;
395
396union SET_QOS_PARAMS {
397 struct SET_QOS_IN request;
398 struct SET_QOS_OUT response;
399} __packed;
400
401/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
402struct FWCMD_COMMON_SET_QOS {
403 union FWCMD_HEADER header;
404 union SET_QOS_PARAMS params;
405} __packed;
406
407struct SET_FRAME_SIZE_IN {
408 u32 max_tx_frame_size;
409 u32 max_rx_frame_size;
410} __packed;
411
412struct SET_FRAME_SIZE_OUT {
413 u32 chip_max_tx_frame_size;
414 u32 chip_max_rx_frame_size;
415} __packed;
416
417union SET_FRAME_SIZE_PARAMS {
418 struct SET_FRAME_SIZE_IN request;
419 struct SET_FRAME_SIZE_OUT response;
420} __packed;
421
422/* Set frame size command. Only host domain may issue this command. */
423struct FWCMD_COMMON_SET_FRAME_SIZE {
424 union FWCMD_HEADER header;
425 union SET_FRAME_SIZE_PARAMS params;
426} __packed;
427
428struct FORCE_FAILOVER_IN {
429 u32 move_to_port;
430 u32 failover_config;
431} __packed;
432
433struct FWCMD_COMMON_ANON_231_RESPONSE {
434 u32 rsvd0;
435} __packed;
436
437union FWCMD_COMMON_ANON_230_PARAMS {
438 struct FORCE_FAILOVER_IN request;
439 struct FWCMD_COMMON_ANON_231_RESPONSE response;
440} __packed;
441
442/*
443 * Use this command to control failover in BladeEngine. It may be used
444 * to failback to a restored port or to forcibly move traffic from
445 * one port to another. It may also be used to enable or disable the
446 * automatic failover feature. This command can only be issued by domain
447 * 0.
448 */
449struct FWCMD_COMMON_FORCE_FAILOVER {
450 union FWCMD_HEADER header;
451 union FWCMD_COMMON_ANON_230_PARAMS params;
452} __packed;
453
454struct FWCMD_COMMON_ANON_240_REQUEST {
455 u64 context;
456} __packed;
457
458struct FWCMD_COMMON_ANON_241_RESPONSE {
459 u64 context;
460} __packed;
461
462union FWCMD_COMMON_ANON_239_PARAMS {
463 struct FWCMD_COMMON_ANON_240_REQUEST request;
464 struct FWCMD_COMMON_ANON_241_RESPONSE response;
465} __packed;
466
467/*
468 * This command can be used by clients as a no-operation request. Typical
469 * uses for drivers are as a heartbeat mechanism, or deferred processing
470 * catalyst. The ARM will always complete this command with a good completion.
471 * The 64-bit parameter is not touched by the ARM processor.
472 */
473struct FWCMD_COMMON_NOP {
474 union FWCMD_HEADER header;
475 union FWCMD_COMMON_ANON_239_PARAMS params;
476} __packed;
477
478struct NTWK_RX_FILTER_SETTINGS {
479 u8 promiscuous;
480 u8 ip_cksum;
481 u8 tcp_cksum;
482 u8 udp_cksum;
483 u8 pass_err;
484 u8 pass_ckerr;
485 u8 strip_crc;
486 u8 mcast_en;
487 u8 bcast_en;
488 u8 mcast_promiscuous_en;
489 u8 unicast_en;
490 u8 vlan_promiscuous;
491} __packed;
492
493union FWCMD_COMMON_ANON_242_PARAMS {
494 struct NTWK_RX_FILTER_SETTINGS request;
495 struct NTWK_RX_FILTER_SETTINGS response;
496} __packed;
497
498/*
499 * This command is used to modify the ethernet receive filter configuration.
500 * Only domain 0 network function drivers may issue this command. The
501 * applied configuration is returned in the response payload. Note:
502 * Some receive packet filter settings are global on BladeEngine and
503 * can affect both the storage and network function clients that the
504 * BladeEngine hardware and firmware serve. Additionaly, depending
505 * on the revision of BladeEngine, some ethernet receive filter settings
506 * are dependent on others. If a dependency exists between settings
507 * for the BladeEngine revision, and the command request settings do
508 * not meet the dependency requirement, the invalid settings will not
509 * be applied despite the comand succeeding. For example: a driver may
510 * request to enable broadcast packets, but not enable multicast packets.
511 * On early revisions of BladeEngine, there may be no distinction between
512 * broadcast and multicast filters, so broadcast could not be enabled
513 * without enabling multicast. In this scenario, the comand would still
514 * succeed, but the response payload would indicate the previously
515 * configured broadcast and multicast setting.
516 */
517struct FWCMD_COMMON_NTWK_RX_FILTER {
518 union FWCMD_HEADER header;
519 union FWCMD_COMMON_ANON_242_PARAMS params;
520} __packed;
521
522
523struct FWCMD_COMMON_ANON_244_REQUEST {
524 u32 rsvd0;
525} __packed;
526
527struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD {
528 u8 firmware_version_string[32];
529 u8 fw_on_flash_version_string[32];
530} __packed;
531
532union FWCMD_COMMON_ANON_243_PARAMS {
533 struct FWCMD_COMMON_ANON_244_REQUEST request;
534 struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD response;
535} __packed;
536
537/* This comand retrieves the firmware version. */
538struct FWCMD_COMMON_GET_FW_VERSION {
539 union FWCMD_HEADER header;
540 union FWCMD_COMMON_ANON_243_PARAMS params;
541} __packed;
542
543struct FWCMD_COMMON_ANON_246_REQUEST {
544 u16 tx_flow_control;
545 u16 rx_flow_control;
546} __packed;
547
548struct FWCMD_COMMON_ANON_247_RESPONSE {
549 u32 rsvd0;
550} __packed;
551
552union FWCMD_COMMON_ANON_245_PARAMS {
553 struct FWCMD_COMMON_ANON_246_REQUEST request;
554 struct FWCMD_COMMON_ANON_247_RESPONSE response;
555} __packed;
556
557/*
558 * This comand is used to program BladeEngine flow control behavior.
559 * Only the host networking driver is allowed to use this comand.
560 */
561struct FWCMD_COMMON_SET_FLOW_CONTROL {
562 union FWCMD_HEADER header;
563 union FWCMD_COMMON_ANON_245_PARAMS params;
564} __packed;
565
566struct FWCMD_COMMON_ANON_249_REQUEST {
567 u32 rsvd0;
568} __packed;
569
570struct FWCMD_COMMON_ANON_250_RESPONSE {
571 u16 tx_flow_control;
572 u16 rx_flow_control;
573} __packed;
574
575union FWCMD_COMMON_ANON_248_PARAMS {
576 struct FWCMD_COMMON_ANON_249_REQUEST request;
577 struct FWCMD_COMMON_ANON_250_RESPONSE response;
578} __packed;
579
580/* This comand is used to read BladeEngine flow control settings. */
581struct FWCMD_COMMON_GET_FLOW_CONTROL {
582 union FWCMD_HEADER header;
583 union FWCMD_COMMON_ANON_248_PARAMS params;
584} __packed;
585
586struct EQ_DELAY_PARAMS {
587 u32 eq_id;
588 u32 delay_in_microseconds;
589} __packed;
590
591struct FWCMD_COMMON_ANON_257_REQUEST {
592 u32 num_eq;
593 u32 rsvd0;
594 struct EQ_DELAY_PARAMS delay[16];
595} __packed;
596
597struct FWCMD_COMMON_ANON_258_RESPONSE {
598 u32 delay_resolution_in_microseconds;
599 u32 delay_max_in_microseconds;
600} __packed;
601
602union MODIFY_EQ_DELAY_PARAMS {
603 struct FWCMD_COMMON_ANON_257_REQUEST request;
604 struct FWCMD_COMMON_ANON_258_RESPONSE response;
605} __packed;
606
607/* This comand changes the EQ delay for a given set of EQs. */
608struct FWCMD_COMMON_MODIFY_EQ_DELAY {
609 union FWCMD_HEADER header;
610 union MODIFY_EQ_DELAY_PARAMS params;
611} __packed;
612
613struct FWCMD_COMMON_ANON_260_REQUEST {
614 u32 rsvd0;
615} __packed;
616
617struct BE_FIRMWARE_CONFIG {
618 u16 be_config_number;
619 u16 asic_revision;
620 u32 nic_ulp_mask;
621 u32 tulp_mask;
622 u32 iscsi_ulp_mask;
623 u32 rdma_ulp_mask;
624 u32 rsvd0[4];
625 u32 eth_tx_id_start;
626 u32 eth_tx_id_count;
627 u32 eth_rx_id_start;
628 u32 eth_rx_id_count;
629 u32 tpm_wrbq_id_start;
630 u32 tpm_wrbq_id_count;
631 u32 tpm_defq_id_start;
632 u32 tpm_defq_id_count;
633 u32 iscsi_wrbq_id_start;
634 u32 iscsi_wrbq_id_count;
635 u32 iscsi_defq_id_start;
636 u32 iscsi_defq_id_count;
637 u32 rdma_qp_id_start;
638 u32 rdma_qp_id_count;
639 u32 rsvd1[8];
640} __packed;
641
642union FWCMD_COMMON_ANON_259_PARAMS {
643 struct FWCMD_COMMON_ANON_260_REQUEST request;
644 struct BE_FIRMWARE_CONFIG response;
645} __packed;
646
647/*
648 * This comand queries the current firmware configuration parameters.
649 * The static configuration type is defined by be_config_number. This
650 * differentiates different BladeEngine builds, such as iSCSI Initiator
651 * versus iSCSI Target. For a given static configuration, the Upper
652 * Layer Protocol (ULP) processors may be reconfigured to support different
653 * protocols. Each ULP processor supports one or more protocols. The
654 * masks indicate which processors are configured for each protocol.
655 * For a given static configuration, the number of TCP connections
656 * supported for each protocol may vary. The *_id_start and *_id_count
657 * variables define a linear range of IDs that are available for each
658 * supported protocol. The *_id_count may be used by the driver to allocate
659 * the appropriate number of connection resources. The *_id_start may
660 * be used to map the arbitrary range of IDs to a zero-based range
661 * of indices.
662 */
663struct FWCMD_COMMON_FIRMWARE_CONFIG {
664 union FWCMD_HEADER header;
665 union FWCMD_COMMON_ANON_259_PARAMS params;
666} __packed;
667
668struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS {
669 u32 emph_lev_sel_port0;
670 u32 emph_lev_sel_port1;
671 u8 xaui_vo_sel;
672 u8 xaui_state;
673 u16 rsvd0;
674 u32 xaui_eq_vector;
675} __packed;
676
677struct FWCMD_COMMON_ANON_262_REQUEST {
678 u32 rsvd0;
679} __packed;
680
681union FWCMD_COMMON_ANON_261_PARAMS {
682 struct FWCMD_COMMON_ANON_262_REQUEST request;
683 struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS response;
684} __packed;
685
686/*
687 * This comand can be used to read XAUI equalization parameters. The
688 * ARM firmware applies default equalization parameters during initialization.
689 * These parameters may be customer-specific when derived from the
690 * SEEPROM. See SEEPROM_DATA for equalization specific fields.
691 */
692struct FWCMD_COMMON_GET_PORT_EQUALIZATION {
693 union FWCMD_HEADER header;
694 union FWCMD_COMMON_ANON_261_PARAMS params;
695} __packed;
696
697struct FWCMD_COMMON_ANON_264_RESPONSE {
698 u32 rsvd0;
699} __packed;
700
701union FWCMD_COMMON_ANON_263_PARAMS {
702 struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS request;
703 struct FWCMD_COMMON_ANON_264_RESPONSE response;
704} __packed;
705
706/*
707 * This comand can be used to set XAUI equalization parameters. The ARM
708 * firmware applies default equalization parameters during initialization.
709 * These parameters may be customer-specific when derived from the
710 * SEEPROM. See SEEPROM_DATA for equalization specific fields.
711 */
712struct FWCMD_COMMON_SET_PORT_EQUALIZATION {
713 union FWCMD_HEADER header;
714 union FWCMD_COMMON_ANON_263_PARAMS params;
715} __packed;
716
717#endif /* __fwcmd_common_bmap_h__ */
diff --git a/drivers/staging/benet/fwcmd_eth_bmap.h b/drivers/staging/benet/fwcmd_eth_bmap.h
deleted file mode 100644
index 234b179eace6..000000000000
--- a/drivers/staging/benet/fwcmd_eth_bmap.h
+++ /dev/null
@@ -1,280 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __fwcmd_eth_bmap_h__
21#define __fwcmd_eth_bmap_h__
22#include "fwcmd_hdr_bmap.h"
23#include "fwcmd_types_bmap.h"
24
25struct MIB_ETH_STATISTICS_PARAMS_IN {
26 u32 rsvd0;
27} __packed;
28
29struct BE_RXF_STATS {
30 u32 p0recvdtotalbytesLSD; /* DWORD 0 */
31 u32 p0recvdtotalbytesMSD; /* DWORD 1 */
32 u32 p0recvdtotalframes; /* DWORD 2 */
33 u32 p0recvdunicastframes; /* DWORD 3 */
34 u32 p0recvdmulticastframes; /* DWORD 4 */
35 u32 p0recvdbroadcastframes; /* DWORD 5 */
36 u32 p0crcerrors; /* DWORD 6 */
37 u32 p0alignmentsymerrs; /* DWORD 7 */
38 u32 p0pauseframesrecvd; /* DWORD 8 */
39 u32 p0controlframesrecvd; /* DWORD 9 */
40 u32 p0inrangelenerrors; /* DWORD 10 */
41 u32 p0outrangeerrors; /* DWORD 11 */
42 u32 p0frametoolongerrors; /* DWORD 12 */
43 u32 p0droppedaddressmatch; /* DWORD 13 */
44 u32 p0droppedvlanmismatch; /* DWORD 14 */
45 u32 p0ipdroppedtoosmall; /* DWORD 15 */
46 u32 p0ipdroppedtooshort; /* DWORD 16 */
47 u32 p0ipdroppedhdrtoosmall; /* DWORD 17 */
48 u32 p0tcpdroppedlen; /* DWORD 18 */
49 u32 p0droppedrunt; /* DWORD 19 */
50 u32 p0recvd64; /* DWORD 20 */
51 u32 p0recvd65_127; /* DWORD 21 */
52 u32 p0recvd128_256; /* DWORD 22 */
53 u32 p0recvd256_511; /* DWORD 23 */
54 u32 p0recvd512_1023; /* DWORD 24 */
55 u32 p0recvd1518_1522; /* DWORD 25 */
56 u32 p0recvd1522_2047; /* DWORD 26 */
57 u32 p0recvd2048_4095; /* DWORD 27 */
58 u32 p0recvd4096_8191; /* DWORD 28 */
59 u32 p0recvd8192_9216; /* DWORD 29 */
60 u32 p0rcvdipcksmerrs; /* DWORD 30 */
61 u32 p0recvdtcpcksmerrs; /* DWORD 31 */
62 u32 p0recvdudpcksmerrs; /* DWORD 32 */
63 u32 p0recvdnonrsspackets; /* DWORD 33 */
64 u32 p0recvdippackets; /* DWORD 34 */
65 u32 p0recvdchute1packets; /* DWORD 35 */
66 u32 p0recvdchute2packets; /* DWORD 36 */
67 u32 p0recvdchute3packets; /* DWORD 37 */
68 u32 p0recvdipsecpackets; /* DWORD 38 */
69 u32 p0recvdmanagementpackets; /* DWORD 39 */
70 u32 p0xmitbyteslsd; /* DWORD 40 */
71 u32 p0xmitbytesmsd; /* DWORD 41 */
72 u32 p0xmitunicastframes; /* DWORD 42 */
73 u32 p0xmitmulticastframes; /* DWORD 43 */
74 u32 p0xmitbroadcastframes; /* DWORD 44 */
75 u32 p0xmitpauseframes; /* DWORD 45 */
76 u32 p0xmitcontrolframes; /* DWORD 46 */
77 u32 p0xmit64; /* DWORD 47 */
78 u32 p0xmit65_127; /* DWORD 48 */
79 u32 p0xmit128_256; /* DWORD 49 */
80 u32 p0xmit256_511; /* DWORD 50 */
81 u32 p0xmit512_1023; /* DWORD 51 */
82 u32 p0xmit1518_1522; /* DWORD 52 */
83 u32 p0xmit1522_2047; /* DWORD 53 */
84 u32 p0xmit2048_4095; /* DWORD 54 */
85 u32 p0xmit4096_8191; /* DWORD 55 */
86 u32 p0xmit8192_9216; /* DWORD 56 */
87 u32 p0rxfifooverflowdropped; /* DWORD 57 */
88 u32 p0ipseclookupfaileddropped; /* DWORD 58 */
89 u32 p1recvdtotalbytesLSD; /* DWORD 59 */
90 u32 p1recvdtotalbytesMSD; /* DWORD 60 */
91 u32 p1recvdtotalframes; /* DWORD 61 */
92 u32 p1recvdunicastframes; /* DWORD 62 */
93 u32 p1recvdmulticastframes; /* DWORD 63 */
94 u32 p1recvdbroadcastframes; /* DWORD 64 */
95 u32 p1crcerrors; /* DWORD 65 */
96 u32 p1alignmentsymerrs; /* DWORD 66 */
97 u32 p1pauseframesrecvd; /* DWORD 67 */
98 u32 p1controlframesrecvd; /* DWORD 68 */
99 u32 p1inrangelenerrors; /* DWORD 69 */
100 u32 p1outrangeerrors; /* DWORD 70 */
101 u32 p1frametoolongerrors; /* DWORD 71 */
102 u32 p1droppedaddressmatch; /* DWORD 72 */
103 u32 p1droppedvlanmismatch; /* DWORD 73 */
104 u32 p1ipdroppedtoosmall; /* DWORD 74 */
105 u32 p1ipdroppedtooshort; /* DWORD 75 */
106 u32 p1ipdroppedhdrtoosmall; /* DWORD 76 */
107 u32 p1tcpdroppedlen; /* DWORD 77 */
108 u32 p1droppedrunt; /* DWORD 78 */
109 u32 p1recvd64; /* DWORD 79 */
110 u32 p1recvd65_127; /* DWORD 80 */
111 u32 p1recvd128_256; /* DWORD 81 */
112 u32 p1recvd256_511; /* DWORD 82 */
113 u32 p1recvd512_1023; /* DWORD 83 */
114 u32 p1recvd1518_1522; /* DWORD 84 */
115 u32 p1recvd1522_2047; /* DWORD 85 */
116 u32 p1recvd2048_4095; /* DWORD 86 */
117 u32 p1recvd4096_8191; /* DWORD 87 */
118 u32 p1recvd8192_9216; /* DWORD 88 */
119 u32 p1rcvdipcksmerrs; /* DWORD 89 */
120 u32 p1recvdtcpcksmerrs; /* DWORD 90 */
121 u32 p1recvdudpcksmerrs; /* DWORD 91 */
122 u32 p1recvdnonrsspackets; /* DWORD 92 */
123 u32 p1recvdippackets; /* DWORD 93 */
124 u32 p1recvdchute1packets; /* DWORD 94 */
125 u32 p1recvdchute2packets; /* DWORD 95 */
126 u32 p1recvdchute3packets; /* DWORD 96 */
127 u32 p1recvdipsecpackets; /* DWORD 97 */
128 u32 p1recvdmanagementpackets; /* DWORD 98 */
129 u32 p1xmitbyteslsd; /* DWORD 99 */
130 u32 p1xmitbytesmsd; /* DWORD 100 */
131 u32 p1xmitunicastframes; /* DWORD 101 */
132 u32 p1xmitmulticastframes; /* DWORD 102 */
133 u32 p1xmitbroadcastframes; /* DWORD 103 */
134 u32 p1xmitpauseframes; /* DWORD 104 */
135 u32 p1xmitcontrolframes; /* DWORD 105 */
136 u32 p1xmit64; /* DWORD 106 */
137 u32 p1xmit65_127; /* DWORD 107 */
138 u32 p1xmit128_256; /* DWORD 108 */
139 u32 p1xmit256_511; /* DWORD 109 */
140 u32 p1xmit512_1023; /* DWORD 110 */
141 u32 p1xmit1518_1522; /* DWORD 111 */
142 u32 p1xmit1522_2047; /* DWORD 112 */
143 u32 p1xmit2048_4095; /* DWORD 113 */
144 u32 p1xmit4096_8191; /* DWORD 114 */
145 u32 p1xmit8192_9216; /* DWORD 115 */
146 u32 p1rxfifooverflowdropped; /* DWORD 116 */
147 u32 p1ipseclookupfaileddropped; /* DWORD 117 */
148 u32 pxdroppednopbuf; /* DWORD 118 */
149 u32 pxdroppednotxpb; /* DWORD 119 */
150 u32 pxdroppednoipsecbuf; /* DWORD 120 */
151 u32 pxdroppednoerxdescr; /* DWORD 121 */
152 u32 pxdroppednotpredescr; /* DWORD 122 */
153 u32 pxrecvdmanagementportpackets; /* DWORD 123 */
154 u32 pxrecvdmanagementportbytes; /* DWORD 124 */
155 u32 pxrecvdmanagementportpauseframes; /* DWORD 125 */
156 u32 pxrecvdmanagementporterrors; /* DWORD 126 */
157 u32 pxxmitmanagementportpackets; /* DWORD 127 */
158 u32 pxxmitmanagementportbytes; /* DWORD 128 */
159 u32 pxxmitmanagementportpause; /* DWORD 129 */
160 u32 pxxmitmanagementportrxfifooverflow; /* DWORD 130 */
161 u32 pxrecvdipsecipcksmerrs; /* DWORD 131 */
162 u32 pxrecvdtcpsecipcksmerrs; /* DWORD 132 */
163 u32 pxrecvdudpsecipcksmerrs; /* DWORD 133 */
164 u32 pxipsecrunt; /* DWORD 134 */
165 u32 pxipsecaddressmismatchdropped; /* DWORD 135 */
166 u32 pxipsecrxfifooverflowdropped; /* DWORD 136 */
167 u32 pxipsecframestoolong; /* DWORD 137 */
168 u32 pxipsectotalipframes; /* DWORD 138 */
169 u32 pxipseciptoosmall; /* DWORD 139 */
170 u32 pxipseciptooshort; /* DWORD 140 */
171 u32 pxipseciphdrtoosmall; /* DWORD 141 */
172 u32 pxipsectcphdrbad; /* DWORD 142 */
173 u32 pxrecvdipsecchute1; /* DWORD 143 */
174 u32 pxrecvdipsecchute2; /* DWORD 144 */
175 u32 pxrecvdipsecchute3; /* DWORD 145 */
176 u32 pxdropped7frags; /* DWORD 146 */
177 u32 pxdroppedfrags; /* DWORD 147 */
178 u32 pxdroppedinvalidfragring; /* DWORD 148 */
179 u32 pxnumforwardedpackets; /* DWORD 149 */
180} __packed;
181
182union MIB_ETH_STATISTICS_PARAMS {
183 struct MIB_ETH_STATISTICS_PARAMS_IN request;
184 struct BE_RXF_STATS response;
185} __packed;
186
187/*
188 * Query ethernet statistics. All domains may issue this command. The
189 * host domain drivers may optionally reset internal statistic counters
190 * with a query.
191 */
192struct FWCMD_ETH_GET_STATISTICS {
193 union FWCMD_HEADER header;
194 union MIB_ETH_STATISTICS_PARAMS params;
195} __packed;
196
197
198struct FWCMD_ETH_ANON_175_REQUEST {
199 u8 port0_promiscuous;
200 u8 port1_promiscuous;
201 u16 rsvd0;
202} __packed;
203
204struct FWCMD_ETH_ANON_176_RESPONSE {
205 u32 rsvd0;
206} __packed;
207
208union FWCMD_ETH_ANON_174_PARAMS {
209 struct FWCMD_ETH_ANON_175_REQUEST request;
210 struct FWCMD_ETH_ANON_176_RESPONSE response;
211} __packed;
212
213/* Enables/Disables promiscuous ethernet receive mode. */
214struct FWCMD_ETH_PROMISCUOUS {
215 union FWCMD_HEADER header;
216 union FWCMD_ETH_ANON_174_PARAMS params;
217} __packed;
218
219struct FWCMD_ETH_ANON_178_REQUEST {
220 u32 new_fragsize_log2;
221} __packed;
222
223struct FWCMD_ETH_ANON_179_RESPONSE {
224 u32 actual_fragsize_log2;
225} __packed;
226
227union FWCMD_ETH_ANON_177_PARAMS {
228 struct FWCMD_ETH_ANON_178_REQUEST request;
229 struct FWCMD_ETH_ANON_179_RESPONSE response;
230} __packed;
231
232/*
233 * Sets the Ethernet RX fragment size. Only host (domain 0) networking
234 * drivers may issue this command. This call will fail for non-host
235 * protection domains. In this situation the MCC CQ status will indicate
236 * a failure due to insufficient priviledges. The response should be
237 * ignored, and the driver should use the FWCMD_ETH_GET_FRAG_SIZE to
238 * query the existing ethernet receive fragment size. It must use this
239 * fragment size for all fragments in the ethernet receive ring. If
240 * the command succeeds, the driver must use the frag size indicated
241 * in the command response since the requested frag size may not be applied
242 * until the next reboot. When the requested fragsize matches the response
243 * fragsize, this indicates the request was applied immediately.
244 */
245struct FWCMD_ETH_SET_RX_FRAG_SIZE {
246 union FWCMD_HEADER header;
247 union FWCMD_ETH_ANON_177_PARAMS params;
248} __packed;
249
250struct FWCMD_ETH_ANON_181_REQUEST {
251 u32 rsvd0;
252} __packed;
253
254struct FWCMD_ETH_ANON_182_RESPONSE {
255 u32 actual_fragsize_log2;
256} __packed;
257
258union FWCMD_ETH_ANON_180_PARAMS {
259 struct FWCMD_ETH_ANON_181_REQUEST request;
260 struct FWCMD_ETH_ANON_182_RESPONSE response;
261} __packed;
262
263/*
264 * Queries the Ethernet RX fragment size. All domains may issue this
265 * command. The driver should call this command to determine the minimum
266 * required fragment size for the ethernet RX ring buffers. Drivers
267 * may choose to use a larger size for each fragment buffer, but BladeEngine
268 * will use up to the configured minimum required fragsize in each ethernet
269 * receive fragment buffer. For example, if the ethernet receive fragment
270 * size is configured to 4kB, and a driver uses 8kB fragments, a 6kB
271 * ethernet packet received by BladeEngine will be split accross two
272 * of the driver's receive framgents (4kB in one fragment buffer, and
273 * 2kB in the subsequent fragment buffer).
274 */
275struct FWCMD_ETH_GET_RX_FRAG_SIZE {
276 union FWCMD_HEADER header;
277 union FWCMD_ETH_ANON_180_PARAMS params;
278} __packed;
279
280#endif /* __fwcmd_eth_bmap_h__ */
diff --git a/drivers/staging/benet/fwcmd_hdr_bmap.h b/drivers/staging/benet/fwcmd_hdr_bmap.h
deleted file mode 100644
index 28b45328fe7b..000000000000
--- a/drivers/staging/benet/fwcmd_hdr_bmap.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __fwcmd_hdr_bmap_h__
21#define __fwcmd_hdr_bmap_h__
22
23struct FWCMD_REQUEST_HEADER {
24 u8 opcode;
25 u8 subsystem;
26 u8 port_number;
27 u8 domain;
28 u32 timeout;
29 u32 request_length;
30 u32 rsvd0;
31} __packed;
32
33struct FWCMD_RESPONSE_HEADER {
34 u8 opcode;
35 u8 subsystem;
36 u8 rsvd0;
37 u8 domain;
38 u8 status;
39 u8 additional_status;
40 u16 rsvd1;
41 u32 response_length;
42 u32 actual_response_length;
43} __packed;
44
45/*
46 * The firmware/driver overwrites the input FWCMD_REQUEST_HEADER with
47 * the output FWCMD_RESPONSE_HEADER.
48 */
49union FWCMD_HEADER {
50 struct FWCMD_REQUEST_HEADER request;
51 struct FWCMD_RESPONSE_HEADER response;
52} __packed;
53
54#endif /* __fwcmd_hdr_bmap_h__ */
diff --git a/drivers/staging/benet/fwcmd_mcc.h b/drivers/staging/benet/fwcmd_mcc.h
deleted file mode 100644
index 9eeca878c1fb..000000000000
--- a/drivers/staging/benet/fwcmd_mcc.h
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __fwcmd_mcc_amap_h__
21#define __fwcmd_mcc_amap_h__
22#include "fwcmd_opcodes.h"
23/*
24 * Where applicable, a WRB, may contain a list of Scatter-gather elements.
25 * Each element supports a 64 bit address and a 32bit length field.
26 */
27struct BE_MCC_SGE_AMAP {
28 u8 pa_lo[32]; /* DWORD 0 */
29 u8 pa_hi[32]; /* DWORD 1 */
30 u8 length[32]; /* DWORD 2 */
31} __packed;
32struct MCC_SGE_AMAP {
33 u32 dw[3];
34};
35/*
36 * The design of an MCC_SGE allows up to 19 elements to be embedded
37 * in a WRB, supporting 64KB data transfers (assuming a 4KB page size).
38 */
39struct BE_MCC_WRB_PAYLOAD_AMAP {
40 union {
41 struct BE_MCC_SGE_AMAP sgl[19];
42 u8 embedded[59][32]; /* DWORD 0 */
43 };
44} __packed;
45struct MCC_WRB_PAYLOAD_AMAP {
46 u32 dw[59];
47};
48
49/*
50 * This is the structure of the MCC Command WRB for commands
51 * sent to the Management Processing Unit (MPU). See section
52 * for usage in embedded and non-embedded modes.
53 */
54struct BE_MCC_WRB_AMAP {
55 u8 embedded; /* DWORD 0 */
56 u8 rsvd0[2]; /* DWORD 0 */
57 u8 sge_count[5]; /* DWORD 0 */
58 u8 rsvd1[16]; /* DWORD 0 */
59 u8 special[8]; /* DWORD 0 */
60 u8 payload_length[32]; /* DWORD 1 */
61 u8 tag[2][32]; /* DWORD 2 */
62 u8 rsvd2[32]; /* DWORD 4 */
63 struct BE_MCC_WRB_PAYLOAD_AMAP payload;
64} __packed;
65struct MCC_WRB_AMAP {
66 u32 dw[64];
67};
68
69/* This is the structure of the MCC Completion queue entry */
70struct BE_MCC_CQ_ENTRY_AMAP {
71 u8 completion_status[16]; /* DWORD 0 */
72 u8 extended_status[16]; /* DWORD 0 */
73 u8 mcc_tag[2][32]; /* DWORD 1 */
74 u8 rsvd0[27]; /* DWORD 3 */
75 u8 consumed; /* DWORD 3 */
76 u8 completed; /* DWORD 3 */
77 u8 hpi_buffer_completion; /* DWORD 3 */
78 u8 async_event; /* DWORD 3 */
79 u8 valid; /* DWORD 3 */
80} __packed;
81struct MCC_CQ_ENTRY_AMAP {
82 u32 dw[4];
83};
84
85/* Mailbox structures used by the MPU during bootstrap */
86struct BE_MCC_MAILBOX_AMAP {
87 struct BE_MCC_WRB_AMAP wrb;
88 struct BE_MCC_CQ_ENTRY_AMAP cq;
89} __packed;
90struct MCC_MAILBOX_AMAP {
91 u32 dw[68];
92};
93
94#endif /* __fwcmd_mcc_amap_h__ */
diff --git a/drivers/staging/benet/fwcmd_opcodes.h b/drivers/staging/benet/fwcmd_opcodes.h
deleted file mode 100644
index 23d569386b46..000000000000
--- a/drivers/staging/benet/fwcmd_opcodes.h
+++ /dev/null
@@ -1,244 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __fwcmd_opcodes_amap_h__
21#define __fwcmd_opcodes_amap_h__
22
23/*
24 * --- FWCMD_SUBSYSTEMS ---
25 * The commands are grouped into the following subsystems. The subsystem
26 * code along with the opcode uniquely identify a particular fwcmd.
27 */
28#define FWCMD_SUBSYSTEM_RSVD (0) /* This subsystem is reserved. It is */
29 /* never used. */
30#define FWCMD_SUBSYSTEM_COMMON (1) /* CMDs in this group are common to
31 * all subsystems. See
32 * COMMON_SUBSYSTEM_OPCODES for opcodes
33 * and Common Host Configuration CMDs
34 * for the FWCMD descriptions.
35 */
36#define FWCMD_SUBSYSTEM_COMMON_ISCSI (2) /* CMDs in this group are */
37 /*
38 * common to Initiator and Target. See
39 * COMMON_ISCSI_SUBSYSTEM_OPCODES and
40 * Common iSCSI Initiator and Target
41 * CMDs for the command descriptions.
42 */
43#define FWCMD_SUBSYSTEM_ETH (3) /* This subsystem is used to
44 execute Ethernet commands. */
45
46#define FWCMD_SUBSYSTEM_TPM (4) /* This subsystem is used
47 to execute TPM commands. */
48#define FWCMD_SUBSYSTEM_PXE_UNDI (5) /* This subsystem is used
49 * to execute PXE
50 * and UNDI specific commands.
51 */
52
53#define FWCMD_SUBSYSTEM_ISCSI_INI (6) /* This subsystem is used to
54 execute ISCSI Initiator
55 specific commands.
56 */
57#define FWCMD_SUBSYSTEM_ISCSI_TGT (7) /* This subsystem is used
58 to execute iSCSI Target
59 specific commands.between
60 PTL and ARM firmware.
61 */
62#define FWCMD_SUBSYSTEM_MILI_PTL (8) /* This subsystem is used to
63 execute iSCSI Target specific
64 commands.between MILI
65 and PTL. */
66#define FWCMD_SUBSYSTEM_MILI_TMD (9) /* This subsystem is used to
67 execute iSCSI Target specific
68 commands between MILI
69 and TMD. */
70#define FWCMD_SUBSYSTEM_PROXY (11) /* This subsystem is used
71 to execute proxied commands
72 within the host at the
73 explicit request of a
74 non priviledged domain.
75 This 'subsystem' is entirely
76 virtual from the controller
77 and firmware perspective as
78 it is implemented in host
79 drivers.
80 */
81
82/*
83 * --- COMMON_SUBSYSTEM_OPCODES ---
84 * These opcodes are common to both networking and storage PCI
85 * functions. They are used to reserve resources and configure
86 * BladeEngine. These opcodes all use the FWCMD_SUBSYSTEM_COMMON
87 * subsystem code.
88 */
89#define OPCODE_COMMON_NTWK_MAC_QUERY (1)
90#define SUBSYSTEM_COMMON_NTWK_MAC_QUERY (1)
91#define SUBSYSTEM_COMMON_NTWK_MAC_SET (1)
92#define SUBSYSTEM_COMMON_NTWK_MULTICAST_SET (1)
93#define SUBSYSTEM_COMMON_NTWK_VLAN_CONFIG (1)
94#define SUBSYSTEM_COMMON_NTWK_LINK_STATUS_QUERY (1)
95#define SUBSYSTEM_COMMON_READ_FLASHROM (1)
96#define SUBSYSTEM_COMMON_WRITE_FLASHROM (1)
97#define SUBSYSTEM_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (1)
98#define SUBSYSTEM_COMMON_ADD_PAGE_TABLES (1)
99#define SUBSYSTEM_COMMON_REMOVE_PAGE_TABLES (1)
100#define SUBSYSTEM_COMMON_RING_DESTROY (1)
101#define SUBSYSTEM_COMMON_CQ_CREATE (1)
102#define SUBSYSTEM_COMMON_EQ_CREATE (1)
103#define SUBSYSTEM_COMMON_ETH_RX_CREATE (1)
104#define SUBSYSTEM_COMMON_ETH_TX_CREATE (1)
105#define SUBSYSTEM_COMMON_ISCSI_DEFQ_CREATE (1)
106#define SUBSYSTEM_COMMON_ISCSI_WRBQ_CREATE (1)
107#define SUBSYSTEM_COMMON_MCC_CREATE (1)
108#define SUBSYSTEM_COMMON_JELL_CONFIG (1)
109#define SUBSYSTEM_COMMON_FORCE_FAILOVER (1)
110#define SUBSYSTEM_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (1)
111#define SUBSYSTEM_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (1)
112#define SUBSYSTEM_COMMON_POST_ZERO_BUFFER (1)
113#define SUBSYSTEM_COMMON_GET_QOS (1)
114#define SUBSYSTEM_COMMON_SET_QOS (1)
115#define SUBSYSTEM_COMMON_TCP_GET_STATISTICS (1)
116#define SUBSYSTEM_COMMON_SEEPROM_READ (1)
117#define SUBSYSTEM_COMMON_TCP_STATE_QUERY (1)
118#define SUBSYSTEM_COMMON_GET_CNTL_ATTRIBUTES (1)
119#define SUBSYSTEM_COMMON_NOP (1)
120#define SUBSYSTEM_COMMON_NTWK_RX_FILTER (1)
121#define SUBSYSTEM_COMMON_GET_FW_VERSION (1)
122#define SUBSYSTEM_COMMON_SET_FLOW_CONTROL (1)
123#define SUBSYSTEM_COMMON_GET_FLOW_CONTROL (1)
124#define SUBSYSTEM_COMMON_SET_TCP_PARAMETERS (1)
125#define SUBSYSTEM_COMMON_SET_FRAME_SIZE (1)
126#define SUBSYSTEM_COMMON_GET_FAT (1)
127#define SUBSYSTEM_COMMON_MODIFY_EQ_DELAY (1)
128#define SUBSYSTEM_COMMON_FIRMWARE_CONFIG (1)
129#define SUBSYSTEM_COMMON_ENABLE_DISABLE_DOMAINS (1)
130#define SUBSYSTEM_COMMON_GET_DOMAIN_CONFIG (1)
131#define SUBSYSTEM_COMMON_SET_VLD_CONFIG (1)
132#define SUBSYSTEM_COMMON_GET_VLD_CONFIG (1)
133#define SUBSYSTEM_COMMON_GET_PORT_EQUALIZATION (1)
134#define SUBSYSTEM_COMMON_SET_PORT_EQUALIZATION (1)
135#define SUBSYSTEM_COMMON_RED_CONFIG (1)
136#define OPCODE_COMMON_NTWK_MAC_SET (2)
137#define OPCODE_COMMON_NTWK_MULTICAST_SET (3)
138#define OPCODE_COMMON_NTWK_VLAN_CONFIG (4)
139#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY (5)
140#define OPCODE_COMMON_READ_FLASHROM (6)
141#define OPCODE_COMMON_WRITE_FLASHROM (7)
142#define OPCODE_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (8)
143#define OPCODE_COMMON_ADD_PAGE_TABLES (9)
144#define OPCODE_COMMON_REMOVE_PAGE_TABLES (10)
145#define OPCODE_COMMON_RING_DESTROY (11)
146#define OPCODE_COMMON_CQ_CREATE (12)
147#define OPCODE_COMMON_EQ_CREATE (13)
148#define OPCODE_COMMON_ETH_RX_CREATE (14)
149#define OPCODE_COMMON_ETH_TX_CREATE (15)
150#define OPCODE_COMMON_NET_RESERVED0 (16) /* Reserved */
151#define OPCODE_COMMON_NET_RESERVED1 (17) /* Reserved */
152#define OPCODE_COMMON_NET_RESERVED2 (18) /* Reserved */
153#define OPCODE_COMMON_ISCSI_DEFQ_CREATE (19)
154#define OPCODE_COMMON_ISCSI_WRBQ_CREATE (20)
155#define OPCODE_COMMON_MCC_CREATE (21)
156#define OPCODE_COMMON_JELL_CONFIG (22)
157#define OPCODE_COMMON_FORCE_FAILOVER (23)
158#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (24)
159#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (25)
160#define OPCODE_COMMON_POST_ZERO_BUFFER (26)
161#define OPCODE_COMMON_GET_QOS (27)
162#define OPCODE_COMMON_SET_QOS (28)
163#define OPCODE_COMMON_TCP_GET_STATISTICS (29)
164#define OPCODE_COMMON_SEEPROM_READ (30)
165#define OPCODE_COMMON_TCP_STATE_QUERY (31)
166#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES (32)
167#define OPCODE_COMMON_NOP (33)
168#define OPCODE_COMMON_NTWK_RX_FILTER (34)
169#define OPCODE_COMMON_GET_FW_VERSION (35)
170#define OPCODE_COMMON_SET_FLOW_CONTROL (36)
171#define OPCODE_COMMON_GET_FLOW_CONTROL (37)
172#define OPCODE_COMMON_SET_TCP_PARAMETERS (38)
173#define OPCODE_COMMON_SET_FRAME_SIZE (39)
174#define OPCODE_COMMON_GET_FAT (40)
175#define OPCODE_COMMON_MODIFY_EQ_DELAY (41)
176#define OPCODE_COMMON_FIRMWARE_CONFIG (42)
177#define OPCODE_COMMON_ENABLE_DISABLE_DOMAINS (43)
178#define OPCODE_COMMON_GET_DOMAIN_CONFIG (44)
179#define OPCODE_COMMON_SET_VLD_CONFIG (45)
180#define OPCODE_COMMON_GET_VLD_CONFIG (46)
181#define OPCODE_COMMON_GET_PORT_EQUALIZATION (47)
182#define OPCODE_COMMON_SET_PORT_EQUALIZATION (48)
183#define OPCODE_COMMON_RED_CONFIG (49)
184
185
186
187/*
188 * --- ETH_SUBSYSTEM_OPCODES ---
189 * These opcodes are used for configuring the Ethernet interfaces. These
190 * opcodes all use the FWCMD_SUBSYSTEM_ETH subsystem code.
191 */
192#define OPCODE_ETH_RSS_CONFIG (1)
193#define OPCODE_ETH_ACPI_CONFIG (2)
194#define SUBSYSTEM_ETH_RSS_CONFIG (3)
195#define SUBSYSTEM_ETH_ACPI_CONFIG (3)
196#define OPCODE_ETH_PROMISCUOUS (3)
197#define SUBSYSTEM_ETH_PROMISCUOUS (3)
198#define SUBSYSTEM_ETH_GET_STATISTICS (3)
199#define SUBSYSTEM_ETH_GET_RX_FRAG_SIZE (3)
200#define SUBSYSTEM_ETH_SET_RX_FRAG_SIZE (3)
201#define OPCODE_ETH_GET_STATISTICS (4)
202#define OPCODE_ETH_GET_RX_FRAG_SIZE (5)
203#define OPCODE_ETH_SET_RX_FRAG_SIZE (6)
204
205
206
207
208
209/*
210 * --- MCC_STATUS_CODE ---
211 * These are the global status codes used by all subsystems
212 */
213#define MCC_STATUS_SUCCESS (0) /* Indicates a successful
214 completion of the command */
215#define MCC_STATUS_INSUFFICIENT_PRIVILEGES (1) /* The client does not have
216 sufficient privileges to
217 execute the command */
218#define MCC_STATUS_INVALID_PARAMETER (2) /* A parameter in the command
219 was invalid. The extended
220 status contains the index
221 of the parameter */
222#define MCC_STATUS_INSUFFICIENT_RESOURCES (3) /* There are insufficient
223 chip resources to execute
224 the command */
225#define MCC_STATUS_QUEUE_FLUSHING (4) /* The command is completing
226 because the queue was
227 getting flushed */
228#define MCC_STATUS_DMA_FAILED (5) /* The command is completing
229 with a DMA error */
230
231/*
232 * --- MGMT_ERROR_CODES ---
233 * Error Codes returned in the status field of the FWCMD response header
234 */
235#define MGMT_STATUS_SUCCESS (0) /* The FWCMD completed
236 without errors */
237#define MGMT_STATUS_FAILED (1) /* Error status in the Status
238 field of the
239 struct FWCMD_RESPONSE_HEADER */
240#define MGMT_STATUS_ILLEGAL_REQUEST (2) /* Invalid FWCMD opcode */
241#define MGMT_STATUS_ILLEGAL_FIELD (3) /* Invalid parameter in
242 the FWCMD payload */
243
244#endif /* __fwcmd_opcodes_amap_h__ */
diff --git a/drivers/staging/benet/fwcmd_types_bmap.h b/drivers/staging/benet/fwcmd_types_bmap.h
deleted file mode 100644
index 92217aff3a16..000000000000
--- a/drivers/staging/benet/fwcmd_types_bmap.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __fwcmd_types_bmap_h__
21#define __fwcmd_types_bmap_h__
22
23/* MAC address format */
24struct MAC_ADDRESS_FORMAT {
25 u16 SizeOfStructure;
26 u8 MACAddress[6];
27} __packed;
28
29#endif /* __fwcmd_types_bmap_h__ */
diff --git a/drivers/staging/benet/host_struct.h b/drivers/staging/benet/host_struct.h
deleted file mode 100644
index 3de6722b980f..000000000000
--- a/drivers/staging/benet/host_struct.h
+++ /dev/null
@@ -1,182 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __host_struct_amap_h__
21#define __host_struct_amap_h__
22#include "be_cm.h"
23#include "be_common.h"
24#include "descriptors.h"
25
26/* --- EQ_COMPLETION_MAJOR_CODE_ENUM --- */
27#define EQ_MAJOR_CODE_COMPLETION (0) /* Completion event on a */
28 /* qcompletion ueue. */
29#define EQ_MAJOR_CODE_ETH (1) /* Affiliated Ethernet Event. */
30#define EQ_MAJOR_CODE_RESERVED (2) /* Reserved */
31#define EQ_MAJOR_CODE_RDMA (3) /* Affiliated RDMA Event. */
32#define EQ_MAJOR_CODE_ISCSI (4) /* Affiliated ISCSI Event */
33#define EQ_MAJOR_CODE_UNAFFILIATED (5) /* Unaffiliated Event */
34
35/* --- EQ_COMPLETION_MINOR_CODE_ENUM --- */
36#define EQ_MINOR_CODE_COMPLETION (0) /* Completion event on a */
37 /* completion queue. */
38#define EQ_MINOR_CODE_OTHER (1) /* Other Event (TBD). */
39
40/* Queue Entry Definition for all 4 byte event queue types. */
41struct BE_EQ_ENTRY_AMAP {
42 u8 Valid; /* DWORD 0 */
43 u8 MajorCode[3]; /* DWORD 0 */
44 u8 MinorCode[12]; /* DWORD 0 */
45 u8 ResourceID[16]; /* DWORD 0 */
46} __packed;
47struct EQ_ENTRY_AMAP {
48 u32 dw[1];
49};
50
51/*
52 * --- ETH_EVENT_CODE ---
53 * These codes are returned by the MPU when one of these events has occurred,
54 * and the event is configured to report to an Event Queue when an event
55 * is detected.
56 */
57#define ETH_EQ_LINK_STATUS (0) /* Link status change event */
58 /* detected. */
59#define ETH_EQ_WATERMARK (1) /* watermark event detected. */
60#define ETH_EQ_MAGIC_PKT (2) /* magic pkt event detected. */
61#define ETH_EQ_ACPI_PKT0 (3) /* ACPI interesting packet */
62 /* detected. */
63#define ETH_EQ_ACPI_PKT1 (3) /* ACPI interesting packet */
64 /* detected. */
65#define ETH_EQ_ACPI_PKT2 (3) /* ACPI interesting packet */
66 /* detected. */
67#define ETH_EQ_ACPI_PKT3 (3) /* ACPI interesting packet */
68 /* detected. */
69
70/*
71 * --- ETH_TX_COMPL_STATUS_ENUM ---
72 * Status codes contained in Ethernet TX completion descriptors.
73 */
74#define ETH_COMP_VALID (0)
75#define ETH_COMP_ERROR (1)
76#define ETH_COMP_INVALID (15)
77
78/*
79 * --- ETH_TX_COMPL_PORT_ENUM ---
80 * Port indicator contained in Ethernet TX completion descriptors.
81 */
82#define ETH_COMP_PORT0 (0)
83#define ETH_COMP_PORT1 (1)
84#define ETH_COMP_MGMT (2)
85
86/*
87 * --- ETH_TX_COMPL_CT_ENUM ---
88 * Completion type indicator contained in Ethernet TX completion descriptors.
89 */
90#define ETH_COMP_ETH (0)
91
92/*
93 * Work request block that the driver issues to the chip for
94 * Ethernet transmissions. All control fields must be valid in each WRB for
95 * a message. The controller, as specified by the flags, optionally writes
96 * an entry to the Completion Ring and generate an event.
97 */
98struct BE_ETH_WRB_AMAP {
99 u8 frag_pa_hi[32]; /* DWORD 0 */
100 u8 frag_pa_lo[32]; /* DWORD 1 */
101 u8 complete; /* DWORD 2 */
102 u8 event; /* DWORD 2 */
103 u8 crc; /* DWORD 2 */
104 u8 forward; /* DWORD 2 */
105 u8 ipsec; /* DWORD 2 */
106 u8 mgmt; /* DWORD 2 */
107 u8 ipcs; /* DWORD 2 */
108 u8 udpcs; /* DWORD 2 */
109 u8 tcpcs; /* DWORD 2 */
110 u8 lso; /* DWORD 2 */
111 u8 last; /* DWORD 2 */
112 u8 vlan; /* DWORD 2 */
113 u8 dbg[3]; /* DWORD 2 */
114 u8 hash_val[3]; /* DWORD 2 */
115 u8 lso_mss[14]; /* DWORD 2 */
116 u8 frag_len[16]; /* DWORD 3 */
117 u8 vlan_tag[16]; /* DWORD 3 */
118} __packed;
119struct ETH_WRB_AMAP {
120 u32 dw[4];
121};
122
123/* This is an Ethernet transmit completion descriptor */
124struct BE_ETH_TX_COMPL_AMAP {
125 u8 user_bytes[16]; /* DWORD 0 */
126 u8 nwh_bytes[8]; /* DWORD 0 */
127 u8 lso; /* DWORD 0 */
128 u8 rsvd0[7]; /* DWORD 0 */
129 u8 wrb_index[16]; /* DWORD 1 */
130 u8 ct[2]; /* DWORD 1 */
131 u8 port[2]; /* DWORD 1 */
132 u8 rsvd1[8]; /* DWORD 1 */
133 u8 status[4]; /* DWORD 1 */
134 u8 rsvd2[16]; /* DWORD 2 */
135 u8 ringid[11]; /* DWORD 2 */
136 u8 hash_val[4]; /* DWORD 2 */
137 u8 valid; /* DWORD 2 */
138 u8 rsvd3[32]; /* DWORD 3 */
139} __packed;
140struct ETH_TX_COMPL_AMAP {
141 u32 dw[4];
142};
143
144/* Ethernet Receive Buffer descriptor */
145struct BE_ETH_RX_D_AMAP {
146 u8 fragpa_hi[32]; /* DWORD 0 */
147 u8 fragpa_lo[32]; /* DWORD 1 */
148} __packed;
149struct ETH_RX_D_AMAP {
150 u32 dw[2];
151};
152
153/* This is an Ethernet Receive Completion Descriptor */
154struct BE_ETH_RX_COMPL_AMAP {
155 u8 vlan_tag[16]; /* DWORD 0 */
156 u8 pktsize[14]; /* DWORD 0 */
157 u8 port; /* DWORD 0 */
158 u8 rsvd0; /* DWORD 0 */
159 u8 err; /* DWORD 1 */
160 u8 rsshp; /* DWORD 1 */
161 u8 ipf; /* DWORD 1 */
162 u8 tcpf; /* DWORD 1 */
163 u8 udpf; /* DWORD 1 */
164 u8 ipcksm; /* DWORD 1 */
165 u8 tcpcksm; /* DWORD 1 */
166 u8 udpcksm; /* DWORD 1 */
167 u8 macdst[6]; /* DWORD 1 */
168 u8 vtp; /* DWORD 1 */
169 u8 vtm; /* DWORD 1 */
170 u8 fragndx[10]; /* DWORD 1 */
171 u8 ct[2]; /* DWORD 1 */
172 u8 ipsec; /* DWORD 1 */
173 u8 numfrags[3]; /* DWORD 1 */
174 u8 rsvd1[31]; /* DWORD 2 */
175 u8 valid; /* DWORD 2 */
176 u8 rsshash[32]; /* DWORD 3 */
177} __packed;
178struct ETH_RX_COMPL_AMAP {
179 u32 dw[4];
180};
181
182#endif /* __host_struct_amap_h__ */
diff --git a/drivers/staging/benet/hwlib.h b/drivers/staging/benet/hwlib.h
deleted file mode 100644
index afedf4dc5903..000000000000
--- a/drivers/staging/benet/hwlib.h
+++ /dev/null
@@ -1,830 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#ifndef __hwlib_h__
18#define __hwlib_h__
19
20#include <linux/module.h>
21#include <linux/io.h>
22#include <linux/list.h>
23#include <linux/spinlock.h>
24
25#include "regmap.h" /* srcgen array map output */
26
27#include "asyncmesg.h"
28#include "fwcmd_opcodes.h"
29#include "post_codes.h"
30#include "fwcmd_mcc.h"
31
32#include "fwcmd_types_bmap.h"
33#include "fwcmd_common_bmap.h"
34#include "fwcmd_eth_bmap.h"
35#include "bestatus.h"
36/*
37 *
38 * Macros for reading/writing a protection domain or CSR registers
39 * in BladeEngine.
40 */
41#define PD_READ(fo, field) ioread32((fo)->db_va + \
42 offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
43
44#define PD_WRITE(fo, field, val) iowrite32(val, (fo)->db_va + \
45 offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
46
47#define CSR_READ(fo, field) ioread32((fo)->csr_va + \
48 offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
49
50#define CSR_WRITE(fo, field, val) iowrite32(val, (fo)->csr_va + \
51 offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
52
53#define PCICFG0_READ(fo, field) ioread32((fo)->pci_va + \
54 offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
55
56#define PCICFG0_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
57 offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
58
59#define PCICFG1_READ(fo, field) ioread32((fo)->pci_va + \
60 offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
61
62#define PCICFG1_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
63 offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
64
65#ifdef BE_DEBUG
66#define ASSERT(c) BUG_ON(!(c));
67#else
68#define ASSERT(c)
69#endif
70
71/* debug levels */
72enum BE_DEBUG_LEVELS {
73 DL_ALWAYS = 0, /* cannot be masked */
74 DL_ERR = 0x1, /* errors that should never happen */
75 DL_WARN = 0x2, /* something questionable.
76 recoverable errors */
77 DL_NOTE = 0x4, /* infrequent, important debug info */
78 DL_INFO = 0x8, /* debug information */
79 DL_VERBOSE = 0x10, /* detailed info, such as buffer traces */
80 BE_DL_MIN_VALUE = 0x1, /* this is the min value used */
81 BE_DL_MAX_VALUE = 0x80 /* this is the higheset value used */
82} ;
83
84extern unsigned int trace_level;
85
86#define TRACE(lm, fmt, args...) { \
87 if (trace_level & lm) { \
88 printk(KERN_NOTICE "BE: %s:%d \n" fmt, \
89 __FILE__ , __LINE__ , ## args); \
90 } \
91 }
92
93static inline unsigned int be_trace_set_level(unsigned int level)
94{
95 unsigned int old_level = trace_level;
96 trace_level = level;
97 return old_level;
98}
99
100#define be_trace_get_level() trace_level
101/*
102 * Returns number of pages spanned by the size of data
103 * starting at the given address.
104 */
105#define PAGES_SPANNED(_address, _size) \
106 ((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \
107 (_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
108/* Byte offset into the page corresponding to given address */
109#define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1))
110
111/*
112 * circular subtract.
113 * Returns a - b assuming a circular number system, where a and b are
114 * in range (0, maxValue-1). If a==b, zero is returned so the
115 * highest value possible with this subtraction is maxValue-1.
116 */
117static inline u32 be_subc(u32 a, u32 b, u32 max)
118{
119 ASSERT(a <= max && b <= max);
120 ASSERT(max > 0);
121 return a >= b ? (a - b) : (max - b + a);
122}
123
124static inline u32 be_addc(u32 a, u32 b, u32 max)
125{
126 ASSERT(a < max);
127 ASSERT(max > 0);
128 return (max - a > b) ? (a + b) : (b + a - max);
129}
130
131/* descriptor for a physically contiguous memory used for ring */
132struct ring_desc {
133 u32 length; /* length in bytes */
134 void *va; /* virtual address */
135 u64 pa; /* bus address */
136} ;
137
138/*
139 * This structure stores information about a ring shared between hardware
140 * and software. Each ring is allocated by the driver in the uncached
141 * extension and mapped into BladeEngine's unified table.
142 */
143struct mp_ring {
144 u32 pages; /* queue size in pages */
145 u32 id; /* queue id assigned by beklib */
146 u32 num; /* number of elements in queue */
147 u32 cidx; /* consumer index */
148 u32 pidx; /* producer index -- not used by most rings */
149 u32 itemSize; /* size in bytes of one object */
150
151 void *va; /* The virtual address of the ring.
152 This should be last to allow 32 & 64
153 bit debugger extensions to work. */
154} ;
155
156/*----------- amap bit filed get / set macros and functions -----*/
157/*
158 * Structures defined in the map header files (under fw/amap/) with names
159 * in the format BE_<name>_AMAP are pseudo structures with members
160 * of type u8. These structures are templates that are used in
161 * conjuntion with the structures with names in the format
162 * <name>_AMAP to calculate the bit masks and bit offsets to get or set
163 * bit fields in structures. The structures <name>_AMAP are arrays
164 * of 32 bits words and have the correct size. The following macros
165 * provide convenient ways to get and set the various members
166 * in the structures without using strucctures with bit fields.
167 * Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR
168 * macros to extract and set various members.
169 */
170
171/*
172 * Returns the a bit mask for the register that is NOT shifted into location.
173 * That means return values always look like: 0x1, 0xFF, 0x7FF, etc...
174 */
175static inline u32 amap_mask(u32 bit_size)
176{
177 return bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1;
178}
179
180#define AMAP_BIT_MASK(_struct_, field) \
181 amap_mask(AMAP_BIT_SIZE(_struct_, field))
182
183/*
184 * non-optimized set bits function. First clears the bits and then assigns them.
185 * This does not require knowledge of the particular DWORD you are setting.
186 * e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123);
187 */
188static inline void
189amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
190{
191 u32 *dw = (u32 *)ptr;
192 *(dw + dw_offset) &= ~(mask << offset);
193 *(dw + dw_offset) |= (mask & value) << offset;
194}
195
196#define AMAP_SET_BITS_PTR(_struct_, field, _structPtr_, val) \
197 amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, field),\
198 AMAP_BIT_MASK(_struct_, field), \
199 AMAP_BIT_OFFSET(_struct_, field), val)
200/*
201 * Non-optimized routine that gets the bits without knowing the correct DWORD.
202 * e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory);
203 */
204static inline u32
205amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
206{
207 u32 *dw = (u32 *)ptr;
208 return mask & (*(dw + dw_offset) >> offset);
209}
210#define AMAP_GET_BITS_PTR(_struct_, field, _structPtr_) \
211 amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, field), \
212 AMAP_BIT_MASK(_struct_, field), \
213 AMAP_BIT_OFFSET(_struct_, field))
214
215/* Returns 0-31 representing bit offset within a DWORD of a bitfield. */
216#define AMAP_BIT_OFFSET(_struct_, field) \
217 (offsetof(struct BE_ ## _struct_ ## _AMAP, field) % 32)
218
219/* Returns 0-n representing DWORD offset of bitfield within the structure. */
220#define AMAP_WORD_OFFSET(_struct_, field) \
221 (offsetof(struct BE_ ## _struct_ ## _AMAP, field)/32)
222
223/* Returns size of bitfield in bits. */
224#define AMAP_BIT_SIZE(_struct_, field) \
225 sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->field)
226
227struct be_mcc_wrb_response_copy {
228 u16 length; /* bytes in response */
229 u16 fwcmd_offset; /* offset within the wrb of the response */
230 void *va; /* user's va to copy response into */
231
232} ;
233typedef void (*mcc_wrb_cqe_callback) (void *context, int status,
234 struct MCC_WRB_AMAP *optional_wrb);
235struct be_mcc_wrb_context {
236
237 mcc_wrb_cqe_callback internal_cb; /* Function to call on
238 completion */
239 void *internal_cb_context; /* Parameter to pass
240 to completion function */
241
242 mcc_wrb_cqe_callback cb; /* Function to call on completion */
243 void *cb_context; /* Parameter to pass to completion function */
244
245 int *users_final_status; /* pointer to a local
246 variable for synchronous
247 commands */
248 struct MCC_WRB_AMAP *wrb; /* pointer to original wrb for embedded
249 commands only */
250 struct list_head next; /* links context structs together in
251 free list */
252
253 struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
254 embedded response to user's va */
255
256#if defined(BE_DEBUG)
257 u16 subsystem, opcode; /* Track this FWCMD for debug builds. */
258 struct MCC_WRB_AMAP *ring_wrb;
259 u32 consumed_count;
260#endif
261} ;
262
263/*
264 Represents a function object for network or storage. This
265 is used to manage per-function resources like MCC CQs, etc.
266*/
267struct be_function_object {
268
269 u32 magic; /*!< magic for detecting memory corruption. */
270
271 /* PCI BAR mapped addresses */
272 u8 __iomem *csr_va; /* CSR */
273 u8 __iomem *db_va; /* Door Bell */
274 u8 __iomem *pci_va; /* PCI config space */
275 u32 emulate; /* if set, MPU is not available.
276 Emulate everything. */
277 u32 pend_queue_driving; /* if set, drive the queued WRBs
278 after releasing the WRB lock */
279
280 spinlock_t post_lock; /* lock for verifying one thread posting wrbs */
281 spinlock_t cq_lock; /* lock for verifying one thread
282 processing cq */
283 spinlock_t mcc_context_lock; /* lock for protecting mcc
284 context free list */
285 unsigned long post_irq;
286 unsigned long cq_irq;
287
288 u32 type;
289 u32 pci_function_number;
290
291 struct be_mcc_object *mcc; /* mcc rings. */
292
293 struct {
294 struct MCC_MAILBOX_AMAP *va; /* VA to the mailbox */
295 u64 pa; /* PA to the mailbox */
296 u32 length; /* byte length of mailbox */
297
298 /* One default context struct used for posting at
299 * least one MCC_WRB
300 */
301 struct be_mcc_wrb_context default_context;
302 bool default_context_allocated;
303 } mailbox;
304
305 struct {
306
307 /* Wake on lans configured. */
308 u32 wol_bitmask; /* bits 0,1,2,3 are set if
309 corresponding index is enabled */
310 } config;
311
312
313 struct BE_FIRMWARE_CONFIG fw_config;
314} ;
315
316/*
317 Represents an Event Queue
318*/
319struct be_eq_object {
320 u32 magic;
321 atomic_t ref_count;
322
323 struct be_function_object *parent_function;
324
325 struct list_head eq_list;
326 struct list_head cq_list_head;
327
328 u32 eq_id;
329 void *cb_context;
330
331} ;
332
333/*
334 Manages a completion queue
335*/
336struct be_cq_object {
337 u32 magic;
338 atomic_t ref_count;
339
340 struct be_function_object *parent_function;
341 struct be_eq_object *eq_object;
342
343 struct list_head cq_list;
344 struct list_head cqlist_for_eq;
345
346 void *va;
347 u32 num_entries;
348
349 void *cb_context;
350
351 u32 cq_id;
352
353} ;
354
355/*
356 Manages an ethernet send queue
357*/
358struct be_ethsq_object {
359 u32 magic;
360
361 struct list_head list;
362
363 struct be_function_object *parent_function;
364 struct be_cq_object *cq_object;
365 u32 bid;
366
367} ;
368
369/*
370@brief
371 Manages an ethernet receive queue
372*/
373struct be_ethrq_object {
374 u32 magic;
375 struct list_head list;
376 struct be_function_object *parent_function;
377 u32 rid;
378 struct be_cq_object *cq_object;
379 struct be_cq_object *rss_cq_object[4];
380
381} ;
382
383/*
384 Manages an MCC
385*/
386typedef void (*mcc_async_event_callback) (void *context, u32 event_code,
387 void *event);
388struct be_mcc_object {
389 u32 magic;
390
391 struct be_function_object *parent_function;
392 struct list_head mcc_list;
393
394 struct be_cq_object *cq_object;
395
396 /* Async event callback for MCC CQ. */
397 mcc_async_event_callback async_cb;
398 void *async_context;
399
400 struct {
401 struct be_mcc_wrb_context *base;
402 u32 num;
403 struct list_head list_head;
404 } wrb_context;
405
406 struct {
407 struct ring_desc *rd;
408 struct mp_ring ring;
409 } sq;
410
411 struct {
412 struct mp_ring ring;
413 } cq;
414
415 u32 processing; /* flag indicating that one thread
416 is processing CQ */
417 u32 rearm; /* doorbell rearm setting to make
418 sure the active processing thread */
419 /* rearms the CQ if any of the threads requested it. */
420
421 struct list_head backlog;
422 u32 backlog_length;
423 u32 driving_backlog;
424 u32 consumed_index;
425
426} ;
427
428
429/* Queue context header -- the required software information for
430 * queueing a WRB.
431 */
432struct be_queue_driver_context {
433 mcc_wrb_cqe_callback internal_cb; /* Function to call on
434 completion */
435 void *internal_cb_context; /* Parameter to pass
436 to completion function */
437
438 mcc_wrb_cqe_callback cb; /* Function to call on completion */
439 void *cb_context; /* Parameter to pass to completion function */
440
441 struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
442 embedded response to user's va */
443 void *optional_fwcmd_va;
444 struct list_head list;
445 u32 bytes;
446} ;
447
448/*
449 * Common MCC WRB header that all commands require.
450 */
451struct be_mcc_wrb_header {
452 u8 rsvd[offsetof(struct BE_MCC_WRB_AMAP, payload)/8];
453} ;
454
455/*
456 * All non embedded commands supported by hwlib functions only allow
457 * 1 SGE. This queue context handles them all.
458 */
459struct be_nonembedded_q_ctxt {
460 struct be_queue_driver_context context;
461 struct be_mcc_wrb_header wrb_header;
462 struct MCC_SGE_AMAP sge[1];
463} ;
464
465/*
466 * ------------------------------------------------------------------------
467 * This section contains the specific queue struct for each command.
468 * The user could always provide a be_generic_q_ctxt but this is a
469 * rather large struct. By using the specific struct, memory consumption
470 * can be reduced.
471 * ------------------------------------------------------------------------
472 */
473
474struct be_link_status_q_ctxt {
475 struct be_queue_driver_context context;
476 struct be_mcc_wrb_header wrb_header;
477 struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd;
478} ;
479
480struct be_multicast_q_ctxt {
481 struct be_queue_driver_context context;
482 struct be_mcc_wrb_header wrb_header;
483 struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd;
484} ;
485
486
487struct be_vlan_q_ctxt {
488 struct be_queue_driver_context context;
489 struct be_mcc_wrb_header wrb_header;
490 struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd;
491} ;
492
493struct be_promiscuous_q_ctxt {
494 struct be_queue_driver_context context;
495 struct be_mcc_wrb_header wrb_header;
496 struct FWCMD_ETH_PROMISCUOUS fwcmd;
497} ;
498
499struct be_force_failover_q_ctxt {
500 struct be_queue_driver_context context;
501 struct be_mcc_wrb_header wrb_header;
502 struct FWCMD_COMMON_FORCE_FAILOVER fwcmd;
503} ;
504
505
506struct be_rxf_filter_q_ctxt {
507 struct be_queue_driver_context context;
508 struct be_mcc_wrb_header wrb_header;
509 struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd;
510} ;
511
512struct be_eq_modify_delay_q_ctxt {
513 struct be_queue_driver_context context;
514 struct be_mcc_wrb_header wrb_header;
515 struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd;
516} ;
517
518/*
519 * The generic context is the largest size that would be required.
520 * It is the software context plus an entire WRB.
521 */
522struct be_generic_q_ctxt {
523 struct be_queue_driver_context context;
524 struct be_mcc_wrb_header wrb_header;
525 struct MCC_WRB_PAYLOAD_AMAP payload;
526} ;
527
528/*
529 * Types for the BE_QUEUE_CONTEXT object.
530 */
531#define BE_QUEUE_INVALID (0)
532#define BE_QUEUE_LINK_STATUS (0xA006)
533#define BE_QUEUE_ETH_STATS (0xA007)
534#define BE_QUEUE_TPM_STATS (0xA008)
535#define BE_QUEUE_TCP_STATS (0xA009)
536#define BE_QUEUE_MULTICAST (0xA00A)
537#define BE_QUEUE_VLAN (0xA00B)
538#define BE_QUEUE_RSS (0xA00C)
539#define BE_QUEUE_FORCE_FAILOVER (0xA00D)
540#define BE_QUEUE_PROMISCUOUS (0xA00E)
541#define BE_QUEUE_WAKE_ON_LAN (0xA00F)
542#define BE_QUEUE_NOP (0xA010)
543
544/* --- BE_FUNCTION_ENUM --- */
545#define BE_FUNCTION_TYPE_ISCSI (0)
546#define BE_FUNCTION_TYPE_NETWORK (1)
547#define BE_FUNCTION_TYPE_ARM (2)
548
549/* --- BE_ETH_TX_RING_TYPE_ENUM --- */
550#define BE_ETH_TX_RING_TYPE_FORWARDING (1) /* Ether ring for forwarding */
551#define BE_ETH_TX_RING_TYPE_STANDARD (2) /* Ether ring for sending */
552 /* network packets. */
553#define BE_ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring for sending */
554 /* network packets, bound */
555 /* to a physical port. */
556/*
557 * ----------------------------------------------------------------------
558 * API MACROS
559 * ----------------------------------------------------------------------
560 */
561#define BE_FWCMD_NAME(_short_name_) struct FWCMD_##_short_name_
562#define BE_OPCODE_NAME(_short_name_) OPCODE_##_short_name_
563#define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_
564
565
566#define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_) \
567 ((BE_FWCMD_NAME(_short_name_) *) \
568 be_function_prepare_embedded_fwcmd(_pfob_, _wrb_, \
569 sizeof(BE_FWCMD_NAME(_short_name_)), \
570 FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
571 FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
572 BE_OPCODE_NAME(_short_name_), \
573 BE_SUBSYSTEM_NAME(_short_name_)));
574
575#define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\
576 ((BE_FWCMD_NAME(_short_name_) *) \
577 be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \
578 sizeof(BE_FWCMD_NAME(_short_name_)), \
579 FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
580 FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
581 BE_OPCODE_NAME(_short_name_), \
582 BE_SUBSYSTEM_NAME(_short_name_)));
583
584int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
585 u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd,
586 struct be_function_object *pfob);
587
588int be_function_object_destroy(struct be_function_object *pfob);
589int be_function_cleanup(struct be_function_object *pfob);
590
591
592int be_function_get_fw_version(struct be_function_object *pfob,
593 struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version,
594 mcc_wrb_cqe_callback cb, void *cb_context);
595
596
597int be_eq_modify_delay(struct be_function_object *pfob,
598 u32 num_eq, struct be_eq_object **eq_array,
599 u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
600 void *cb_context,
601 struct be_eq_modify_delay_q_ctxt *q_ctxt);
602
603
604
605int be_eq_create(struct be_function_object *pfob,
606 struct ring_desc *rd, u32 eqe_size, u32 num_entries,
607 u32 watermark, u32 timer_delay, struct be_eq_object *eq_object);
608
609int be_eq_destroy(struct be_eq_object *eq);
610
611int be_cq_create(struct be_function_object *pfob,
612 struct ring_desc *rd, u32 length,
613 bool solicited_eventable, bool no_delay,
614 u32 wm_thresh, struct be_eq_object *eq_object,
615 struct be_cq_object *cq_object);
616
617int be_cq_destroy(struct be_cq_object *cq);
618
619int be_mcc_ring_create(struct be_function_object *pfob,
620 struct ring_desc *rd, u32 length,
621 struct be_mcc_wrb_context *context_array,
622 u32 num_context_entries,
623 struct be_cq_object *cq, struct be_mcc_object *mcc);
624int be_mcc_ring_destroy(struct be_mcc_object *mcc_object);
625
626int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm);
627
628int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
629 mcc_async_event_callback cb, void *cb_context);
630
631int be_pci_soft_reset(struct be_function_object *pfob);
632
633
634int be_drive_POST(struct be_function_object *pfob);
635
636
637int be_eth_sq_create(struct be_function_object *pfob,
638 struct ring_desc *rd, u32 length_in_bytes,
639 u32 type, u32 ulp, struct be_cq_object *cq_object,
640 struct be_ethsq_object *eth_sq);
641
642struct be_eth_sq_parameters {
643 u32 port;
644 u32 rsvd0[2];
645} ;
646
647int be_eth_sq_create_ex(struct be_function_object *pfob,
648 struct ring_desc *rd, u32 length_in_bytes,
649 u32 type, u32 ulp, struct be_cq_object *cq_object,
650 struct be_eth_sq_parameters *ex_parameters,
651 struct be_ethsq_object *eth_sq);
652int be_eth_sq_destroy(struct be_ethsq_object *eth_sq);
653
654int be_eth_set_flow_control(struct be_function_object *pfob,
655 bool txfc_enable, bool rxfc_enable);
656
657int be_eth_get_flow_control(struct be_function_object *pfob,
658 bool *txfc_enable, bool *rxfc_enable);
659int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps);
660
661int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps);
662
663int be_eth_set_frame_size(struct be_function_object *pfob,
664 u32 *tx_frame_size, u32 *rx_frame_size);
665
666int be_eth_rq_create(struct be_function_object *pfob,
667 struct ring_desc *rd, struct be_cq_object *cq_object,
668 struct be_cq_object *bcmc_cq_object,
669 struct be_ethrq_object *eth_rq);
670
671int be_eth_rq_destroy(struct be_ethrq_object *eth_rq);
672
673int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
674 mcc_wrb_cqe_callback cb, void *cb_context);
675int be_eth_rq_set_frag_size(struct be_function_object *pfob,
676 u32 new_frag_size_bytes, u32 *actual_frag_size_bytes);
677int be_eth_rq_get_frag_size(struct be_function_object *pfob,
678 u32 *frag_size_bytes);
679
680void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
681 struct MCC_WRB_AMAP *wrb,
682 u32 payload_length, u32 request_length,
683 u32 response_length, u32 opcode, u32 subsystem);
684void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
685 struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa,
686 u32 payload_length, u32 request_length, u32 response_length,
687 u32 opcode, u32 subsystem);
688
689
690struct MCC_WRB_AMAP *
691be_function_peek_mcc_wrb(struct be_function_object *pfob);
692
693int be_rxf_mac_address_read_write(struct be_function_object *pfob,
694 bool port1, bool mac1, bool mgmt,
695 bool write, bool permanent, u8 *mac_address,
696 mcc_wrb_cqe_callback cb,
697 void *cb_context);
698
699int be_rxf_multicast_config(struct be_function_object *pfob,
700 bool promiscuous, u32 num, u8 *mac_table,
701 mcc_wrb_cqe_callback cb,
702 void *cb_context,
703 struct be_multicast_q_ctxt *q_ctxt);
704
705int be_rxf_vlan_config(struct be_function_object *pfob,
706 bool promiscuous, u32 num, u16 *vlan_tag_array,
707 mcc_wrb_cqe_callback cb, void *cb_context,
708 struct be_vlan_q_ctxt *q_ctxt);
709
710
711int be_rxf_link_status(struct be_function_object *pfob,
712 struct BE_LINK_STATUS *link_status,
713 mcc_wrb_cqe_callback cb,
714 void *cb_context,
715 struct be_link_status_q_ctxt *q_ctxt);
716
717
718int be_rxf_query_eth_statistics(struct be_function_object *pfob,
719 struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
720 u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
721 void *cb_context,
722 struct be_nonembedded_q_ctxt *q_ctxt);
723
724int be_rxf_promiscuous(struct be_function_object *pfob,
725 bool enable_port0, bool enable_port1,
726 mcc_wrb_cqe_callback cb, void *cb_context,
727 struct be_promiscuous_q_ctxt *q_ctxt);
728
729
730int be_rxf_filter_config(struct be_function_object *pfob,
731 struct NTWK_RX_FILTER_SETTINGS *settings,
732 mcc_wrb_cqe_callback cb,
733 void *cb_context,
734 struct be_rxf_filter_q_ctxt *q_ctxt);
735
736/*
737 * ------------------------------------------------------
738 * internal functions used by hwlib
739 * ------------------------------------------------------
740 */
741
742
743int be_function_ring_destroy(struct be_function_object *pfob,
744 u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
745 void *cb_context,
746 mcc_wrb_cqe_callback internal_cb,
747 void *internal_callback_context);
748
749int be_function_post_mcc_wrb(struct be_function_object *pfob,
750 struct MCC_WRB_AMAP *wrb,
751 struct be_generic_q_ctxt *q_ctxt,
752 mcc_wrb_cqe_callback cb, void *cb_context,
753 mcc_wrb_cqe_callback internal_cb,
754 void *internal_cb_context, void *optional_fwcmd_va,
755 struct be_mcc_wrb_response_copy *response_copy);
756
757int be_function_queue_mcc_wrb(struct be_function_object *pfob,
758 struct be_generic_q_ctxt *q_ctxt);
759
760/*
761 * ------------------------------------------------------
762 * MCC QUEUE
763 * ------------------------------------------------------
764 */
765
766int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd);
767
768
769struct MCC_WRB_AMAP *
770_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue);
771
772struct be_mcc_wrb_context *
773_be_mcc_allocate_wrb_context(struct be_function_object *pfob);
774
775void _be_mcc_free_wrb_context(struct be_function_object *pfob,
776 struct be_mcc_wrb_context *context);
777
778int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
779 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
780
781int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc,
782 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
783
784void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc);
785
786
787/*
788 * ------------------------------------------------------
789 * Ring Sizes
790 * ------------------------------------------------------
791 */
792static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size)
793{
794
795 ASSERT(encoding != 1); /* 1 is rsvd */
796 ASSERT(encoding < 16);
797 ASSERT(object_size > 0);
798
799 if (encoding == 0) /* 32k deep */
800 encoding = 16;
801
802 return (1 << (encoding - 1)) * object_size;
803}
804
805static inline
806u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size)
807{
808
809 u32 count, encoding;
810
811 ASSERT(object_size > 0);
812 ASSERT(length_in_bytes % object_size == 0);
813
814 count = length_in_bytes / object_size;
815
816 ASSERT(count > 1);
817 ASSERT(count <= 32 * 1024);
818 ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */
819
820 encoding = __ilog2_u32(count) + 1;
821
822 if (encoding == 16)
823 encoding = 0; /* 32k deep */
824
825 return encoding;
826}
827
828void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list,
829 u32 max_num);
830#endif /* __hwlib_h__ */
diff --git a/drivers/staging/benet/mpu.c b/drivers/staging/benet/mpu.c
deleted file mode 100644
index 269cc11d3055..000000000000
--- a/drivers/staging/benet/mpu.c
+++ /dev/null
@@ -1,1364 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17#include <linux/delay.h>
18#include "hwlib.h"
19#include "bestatus.h"
20
21static
22inline void mp_ring_create(struct mp_ring *ring, u32 num, u32 size, void *va)
23{
24 ASSERT(ring);
25 memset(ring, 0, sizeof(struct mp_ring));
26 ring->num = num;
27 ring->pages = DIV_ROUND_UP(num * size, PAGE_SIZE);
28 ring->itemSize = size;
29 ring->va = va;
30}
31
32/*
33 * -----------------------------------------------------------------------
34 * Interface for 2 index rings. i.e. consumer/producer rings
35 * --------------------------------------------------------------------------
36 */
37
38/* Returns number items pending on ring. */
39static inline u32 mp_ring_num_pending(struct mp_ring *ring)
40{
41 ASSERT(ring);
42 if (ring->num == 0)
43 return 0;
44 return be_subc(ring->pidx, ring->cidx, ring->num);
45}
46
47/* Returns number items free on ring. */
48static inline u32 mp_ring_num_empty(struct mp_ring *ring)
49{
50 ASSERT(ring);
51 return ring->num - 1 - mp_ring_num_pending(ring);
52}
53
54/* Consume 1 item */
55static inline void mp_ring_consume(struct mp_ring *ring)
56{
57 ASSERT(ring);
58 ASSERT(ring->pidx != ring->cidx);
59
60 ring->cidx = be_addc(ring->cidx, 1, ring->num);
61}
62
63/* Produce 1 item */
64static inline void mp_ring_produce(struct mp_ring *ring)
65{
66 ASSERT(ring);
67 ring->pidx = be_addc(ring->pidx, 1, ring->num);
68}
69
70/* Consume count items */
71static inline void mp_ring_consume_multiple(struct mp_ring *ring, u32 count)
72{
73 ASSERT(ring);
74 ASSERT(mp_ring_num_pending(ring) >= count);
75 ring->cidx = be_addc(ring->cidx, count, ring->num);
76}
77
78static inline void *mp_ring_item(struct mp_ring *ring, u32 index)
79{
80 ASSERT(ring);
81 ASSERT(index < ring->num);
82 ASSERT(ring->itemSize > 0);
83 return (u8 *) ring->va + index * ring->itemSize;
84}
85
86/* Ptr to produce item */
87static inline void *mp_ring_producer_ptr(struct mp_ring *ring)
88{
89 ASSERT(ring);
90 return mp_ring_item(ring, ring->pidx);
91}
92
93/*
94 * Returns a pointer to the current location in the ring.
95 * This is used for rings with 1 index.
96 */
97static inline void *mp_ring_current(struct mp_ring *ring)
98{
99 ASSERT(ring);
100 ASSERT(ring->pidx == 0); /* not used */
101
102 return mp_ring_item(ring, ring->cidx);
103}
104
105/*
106 * Increment index for rings with only 1 index.
107 * This is used for rings with 1 index.
108 */
109static inline void *mp_ring_next(struct mp_ring *ring)
110{
111 ASSERT(ring);
112 ASSERT(ring->num > 0);
113 ASSERT(ring->pidx == 0); /* not used */
114
115 ring->cidx = be_addc(ring->cidx, 1, ring->num);
116 return mp_ring_current(ring);
117}
118
119/*
120 This routine waits for a previously posted mailbox WRB to be completed.
121 Specifically it waits for the mailbox to say that it's ready to accept
122 more data by setting the LSB of the mailbox pd register to 1.
123
124 pcontroller - The function object to post this data to
125
126 IRQL < DISPATCH_LEVEL
127*/
128static void be_mcc_mailbox_wait(struct be_function_object *pfob)
129{
130 struct MPU_MAILBOX_DB_AMAP mailbox_db;
131 u32 i = 0;
132 u32 ready;
133
134 if (pfob->emulate) {
135 /* No waiting for mailbox in emulated mode. */
136 return;
137 }
138
139 mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
140 ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
141
142 while (ready == false) {
143 if ((++i & 0x3FFFF) == 0) {
144 TRACE(DL_WARN, "Waiting for mailbox ready - %dk polls",
145 i / 1000);
146 }
147 udelay(1);
148 mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
149 ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
150 }
151}
152
153/*
154 This routine tells the MCC mailbox that there is data to processed
155 in the mailbox. It does this by setting the physical address for the
156 mailbox location and clearing the LSB. This routine returns immediately
157 and does not wait for the WRB to be processed.
158
159 pcontroller - The function object to post this data to
160
161 IRQL < DISPATCH_LEVEL
162
163*/
164static void be_mcc_mailbox_notify(struct be_function_object *pfob)
165{
166 struct MPU_MAILBOX_DB_AMAP mailbox_db;
167 u32 pa;
168
169 ASSERT(pfob->mailbox.pa);
170 ASSERT(pfob->mailbox.va);
171
172 /* If emulated, do not ring the mailbox */
173 if (pfob->emulate) {
174 TRACE(DL_WARN, "MPU disabled. Skipping mailbox notify.");
175 return;
176 }
177
178 /* form the higher bits in the address */
179 mailbox_db.dw[0] = 0; /* init */
180 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 1);
181 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
182
183 /* bits 34 to 63 */
184 pa = (u32) (pfob->mailbox.pa >> 34);
185 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
186
187 /* Wait for the MPU to be ready */
188 be_mcc_mailbox_wait(pfob);
189
190 /* Ring doorbell 1st time */
191 PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
192
193 /* Wait for 1st write to be acknowledged. */
194 be_mcc_mailbox_wait(pfob);
195
196 /* lower bits 30 bits from 4th bit (bits 4 to 33)*/
197 pa = (u32) (pfob->mailbox.pa >> 4) & 0x3FFFFFFF;
198
199 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 0);
200 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
201 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
202
203 /* Ring doorbell 2nd time */
204 PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
205}
206
207/*
208 This routine tells the MCC mailbox that there is data to processed
209 in the mailbox. It does this by setting the physical address for the
210 mailbox location and clearing the LSB. This routine spins until the
211 MPU writes a 1 into the LSB indicating that the data has been received
212 and is ready to be processed.
213
214 pcontroller - The function object to post this data to
215
216 IRQL < DISPATCH_LEVEL
217*/
218static void
219be_mcc_mailbox_notify_and_wait(struct be_function_object *pfob)
220{
221 /*
222 * Notify it
223 */
224 be_mcc_mailbox_notify(pfob);
225 /*
226 * Now wait for completion of WRB
227 */
228 be_mcc_mailbox_wait(pfob);
229}
230
231void
232be_mcc_process_cqe(struct be_function_object *pfob,
233 struct MCC_CQ_ENTRY_AMAP *cqe)
234{
235 struct be_mcc_wrb_context *wrb_context = NULL;
236 u32 offset, status;
237 u8 *p;
238
239 ASSERT(cqe);
240 /*
241 * A command completed. Commands complete out-of-order.
242 * Determine which command completed from the TAG.
243 */
244 offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8;
245 p = (u8 *) cqe + offset;
246 wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
247 ASSERT(wrb_context);
248
249 /*
250 * Perform a response copy if requested.
251 * Only copy data if the FWCMD is successful.
252 */
253 status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, cqe);
254 if (status == MGMT_STATUS_SUCCESS && wrb_context->copy.length > 0) {
255 ASSERT(wrb_context->wrb);
256 ASSERT(wrb_context->copy.va);
257 p = (u8 *)wrb_context->wrb +
258 offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
259 memcpy(wrb_context->copy.va,
260 (u8 *)p + wrb_context->copy.fwcmd_offset,
261 wrb_context->copy.length);
262 }
263
264 if (status)
265 status = BE_NOT_OK;
266 /* internal callback */
267 if (wrb_context->internal_cb) {
268 wrb_context->internal_cb(wrb_context->internal_cb_context,
269 status, wrb_context->wrb);
270 }
271
272 /* callback */
273 if (wrb_context->cb) {
274 wrb_context->cb(wrb_context->cb_context,
275 status, wrb_context->wrb);
276 }
277 /* Free the context structure */
278 _be_mcc_free_wrb_context(pfob, wrb_context);
279}
280
281void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc)
282{
283 struct be_function_object *pfob = NULL;
284 int status = BE_PENDING;
285 struct be_generic_q_ctxt *q_ctxt;
286 struct MCC_WRB_AMAP *wrb;
287 struct MCC_WRB_AMAP *queue_wrb;
288 u32 length, payload_length, sge_count, embedded;
289 unsigned long irql;
290
291 BUILD_BUG_ON((sizeof(struct be_generic_q_ctxt) <
292 sizeof(struct be_queue_driver_context) +
293 sizeof(struct MCC_WRB_AMAP)));
294 pfob = mcc->parent_function;
295
296 spin_lock_irqsave(&pfob->post_lock, irql);
297
298 if (mcc->driving_backlog) {
299 spin_unlock_irqrestore(&pfob->post_lock, irql);
300 if (pfob->pend_queue_driving && pfob->mcc) {
301 pfob->pend_queue_driving = 0;
302 be_drive_mcc_wrb_queue(pfob->mcc);
303 }
304 return;
305 }
306 /* Acquire the flag to limit 1 thread to redrive posts. */
307 mcc->driving_backlog = 1;
308
309 while (!list_empty(&mcc->backlog)) {
310 wrb = _be_mpu_peek_ring_wrb(mcc, true); /* Driving the queue */
311 if (!wrb)
312 break; /* No space in the ring yet. */
313 /* Get the next queued entry to process. */
314 q_ctxt = list_first_entry(&mcc->backlog,
315 struct be_generic_q_ctxt, context.list);
316 list_del(&q_ctxt->context.list);
317 pfob->mcc->backlog_length--;
318 /*
319 * Compute the required length of the WRB.
320 * Since the queue element may be smaller than
321 * the complete WRB, copy only the required number of bytes.
322 */
323 queue_wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
324 embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, queue_wrb);
325 if (embedded) {
326 payload_length = AMAP_GET_BITS_PTR(MCC_WRB,
327 payload_length, queue_wrb);
328 length = sizeof(struct be_mcc_wrb_header) +
329 payload_length;
330 } else {
331 sge_count = AMAP_GET_BITS_PTR(MCC_WRB, sge_count,
332 queue_wrb);
333 ASSERT(sge_count == 1); /* only 1 frag. */
334 length = sizeof(struct be_mcc_wrb_header) +
335 sge_count * sizeof(struct MCC_SGE_AMAP);
336 }
337
338 /*
339 * Truncate the length based on the size of the
340 * queue element. Some elements that have output parameters
341 * can be smaller than the payload_length field would
342 * indicate. We really only need to copy the request
343 * parameters, not the response.
344 */
345 length = min(length, (u32) (q_ctxt->context.bytes -
346 offsetof(struct be_generic_q_ctxt, wrb_header)));
347
348 /* Copy the queue element WRB into the ring. */
349 memcpy(wrb, &q_ctxt->wrb_header, length);
350
351 /* Post the wrb. This should not fail assuming we have
352 * enough context structs. */
353 status = be_function_post_mcc_wrb(pfob, wrb, NULL,
354 q_ctxt->context.cb, q_ctxt->context.cb_context,
355 q_ctxt->context.internal_cb,
356 q_ctxt->context.internal_cb_context,
357 q_ctxt->context.optional_fwcmd_va,
358 &q_ctxt->context.copy);
359
360 if (status == BE_SUCCESS) {
361 /*
362 * Synchronous completion. Since it was queued,
363 * we will invoke the callback.
364 * To the user, this is an asynchronous request.
365 */
366 spin_unlock_irqrestore(&pfob->post_lock, irql);
367 if (pfob->pend_queue_driving && pfob->mcc) {
368 pfob->pend_queue_driving = 0;
369 be_drive_mcc_wrb_queue(pfob->mcc);
370 }
371
372 ASSERT(q_ctxt->context.cb);
373
374 q_ctxt->context.cb(
375 q_ctxt->context.cb_context,
376 BE_SUCCESS, NULL);
377
378 spin_lock_irqsave(&pfob->post_lock, irql);
379
380 } else if (status != BE_PENDING) {
381 /*
382 * Another resource failed. Should never happen
383 * if we have sufficient MCC_WRB_CONTEXT structs.
384 * Return to head of the queue.
385 */
386 TRACE(DL_WARN, "Failed to post a queued WRB. 0x%x",
387 status);
388 list_add(&q_ctxt->context.list, &mcc->backlog);
389 pfob->mcc->backlog_length++;
390 break;
391 }
392 }
393
394 /* Free the flag to limit 1 thread to redrive posts. */
395 mcc->driving_backlog = 0;
396 spin_unlock_irqrestore(&pfob->post_lock, irql);
397}
398
399/* This function asserts that the WRB was consumed in order. */
400#ifdef BE_DEBUG
401u32 be_mcc_wrb_consumed_in_order(struct be_mcc_object *mcc,
402 struct MCC_CQ_ENTRY_AMAP *cqe)
403{
404 struct be_mcc_wrb_context *wrb_context = NULL;
405 u32 wrb_index;
406 u32 wrb_consumed_in_order;
407 u32 offset;
408 u8 *p;
409
410 ASSERT(cqe);
411 /*
412 * A command completed. Commands complete out-of-order.
413 * Determine which command completed from the TAG.
414 */
415 offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8;
416 p = (u8 *) cqe + offset;
417 wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
418
419 ASSERT(wrb_context);
420
421 wrb_index = (u32) (((u64)(size_t)wrb_context->ring_wrb -
422 (u64)(size_t)mcc->sq.ring.va) / sizeof(struct MCC_WRB_AMAP));
423
424 ASSERT(wrb_index < mcc->sq.ring.num);
425
426 wrb_consumed_in_order = (u32) (wrb_index == mcc->consumed_index);
427 mcc->consumed_index = be_addc(mcc->consumed_index, 1, mcc->sq.ring.num);
428 return wrb_consumed_in_order;
429}
430#endif
431
432int be_mcc_process_cq(struct be_mcc_object *mcc, bool rearm)
433{
434 struct be_function_object *pfob = NULL;
435 struct MCC_CQ_ENTRY_AMAP *cqe;
436 struct CQ_DB_AMAP db;
437 struct mp_ring *cq_ring = &mcc->cq.ring;
438 struct mp_ring *mp_ring = &mcc->sq.ring;
439 u32 num_processed = 0;
440 u32 consumed = 0, valid, completed, cqe_consumed, async_event;
441
442 pfob = mcc->parent_function;
443
444 spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
445
446 /*
447 * Verify that only one thread is processing the CQ at once.
448 * We cannot hold the lock while processing the CQ due to
449 * the callbacks into the OS. Therefore, this flag is used
450 * to control it. If any of the threads want to
451 * rearm the CQ, we need to honor that.
452 */
453 if (mcc->processing != 0) {
454 mcc->rearm = mcc->rearm || rearm;
455 goto Error;
456 } else {
457 mcc->processing = 1; /* lock processing for this thread. */
458 mcc->rearm = rearm; /* set our rearm setting */
459 }
460
461 spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
462
463 cqe = mp_ring_current(cq_ring);
464 valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
465 while (valid) {
466
467 if (num_processed >= 8) {
468 /* coalesce doorbells, but free space in cq
469 * ring while processing. */
470 db.dw[0] = 0; /* clear */
471 AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
472 AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, false);
473 AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
474 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db,
475 num_processed);
476 num_processed = 0;
477
478 PD_WRITE(pfob, cq_db, db.dw[0]);
479 }
480
481 async_event = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, async_event, cqe);
482 if (async_event) {
483 /* This is an asynchronous event. */
484 struct ASYNC_EVENT_TRAILER_AMAP *async_trailer =
485 (struct ASYNC_EVENT_TRAILER_AMAP *)
486 ((u8 *) cqe + sizeof(struct MCC_CQ_ENTRY_AMAP) -
487 sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
488 u32 event_code;
489 async_event = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
490 async_event, async_trailer);
491 ASSERT(async_event == 1);
492
493
494 valid = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
495 valid, async_trailer);
496 ASSERT(valid == 1);
497
498 /* Call the async event handler if it is installed. */
499 if (mcc->async_cb) {
500 event_code =
501 AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
502 event_code, async_trailer);
503 mcc->async_cb(mcc->async_context,
504 (u32) event_code, (void *) cqe);
505 }
506
507 } else {
508 /* This is a completion entry. */
509
510 /* No vm forwarding in this driver. */
511
512 cqe_consumed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
513 consumed, cqe);
514 if (cqe_consumed) {
515 /*
516 * A command on the MCC ring was consumed.
517 * Update the consumer index.
518 * These occur in order.
519 */
520 ASSERT(be_mcc_wrb_consumed_in_order(mcc, cqe));
521 consumed++;
522 }
523
524 completed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
525 completed, cqe);
526 if (completed) {
527 /* A command completed. Use tag to
528 * determine which command. */
529 be_mcc_process_cqe(pfob, cqe);
530 }
531 }
532
533 /* Reset the CQE */
534 AMAP_SET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe, false);
535 num_processed++;
536
537 /* Update our tracking for the CQ ring. */
538 cqe = mp_ring_next(cq_ring);
539 valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
540 }
541
542 TRACE(DL_INFO, "num_processed:0x%x, and consumed:0x%x",
543 num_processed, consumed);
544 /*
545 * Grab the CQ lock to synchronize the "rearm" setting for
546 * the doorbell, and for clearing the "processing" flag.
547 */
548 spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
549
550 /*
551 * Rearm the cq. This is done based on the global mcc->rearm
552 * flag which combines the rearm parameter from the current
553 * call to process_cq and any other threads
554 * that tried to process the CQ while this one was active.
555 * This handles the situation where a sync. fwcmd was processing
556 * the CQ while the interrupt/dpc tries to process it.
557 * The sync process gets to continue -- but it is now
558 * responsible for the rearming.
559 */
560 if (num_processed > 0 || mcc->rearm == true) {
561 db.dw[0] = 0; /* clear */
562 AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
563 AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, mcc->rearm);
564 AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
565 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db, num_processed);
566
567 PD_WRITE(pfob, cq_db, db.dw[0]);
568 }
569 /*
570 * Update the consumer index after ringing the CQ doorbell.
571 * We don't want another thread to post more WRBs before we
572 * have CQ space available.
573 */
574 mp_ring_consume_multiple(mp_ring, consumed);
575
576 /* Clear the processing flag. */
577 mcc->processing = 0;
578
579Error:
580 spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
581 /*
582 * Use the local variable to detect if the current thread
583 * holds the WRB post lock. If rearm is false, this is
584 * either a synchronous command, or the upper layer driver is polling
585 * from a thread. We do not drive the queue from that
586 * context since the driver may hold the
587 * wrb post lock already.
588 */
589 if (rearm)
590 be_drive_mcc_wrb_queue(mcc);
591 else
592 pfob->pend_queue_driving = 1;
593
594 return BE_SUCCESS;
595}
596
597/*
598 *============================================================================
599 * P U B L I C R O U T I N E S
600 *============================================================================
601 */
602
603/*
604 This routine creates an MCC object. This object contains an MCC send queue
605 and a CQ private to the MCC.
606
607 pcontroller - Handle to a function object
608
609 EqObject - EQ object that will be used to dispatch this MCC
610
611 ppMccObject - Pointer to an internal Mcc Object returned.
612
613 Returns BE_SUCCESS if successfull,, otherwise a useful error code
614 is returned.
615
616 IRQL < DISPATCH_LEVEL
617
618*/
619int
620be_mcc_ring_create(struct be_function_object *pfob,
621 struct ring_desc *rd, u32 length,
622 struct be_mcc_wrb_context *context_array,
623 u32 num_context_entries,
624 struct be_cq_object *cq, struct be_mcc_object *mcc)
625{
626 int status = 0;
627
628 struct FWCMD_COMMON_MCC_CREATE *fwcmd = NULL;
629 struct MCC_WRB_AMAP *wrb = NULL;
630 u32 num_entries_encoded, n, i;
631 void *va = NULL;
632 unsigned long irql;
633
634 if (length < sizeof(struct MCC_WRB_AMAP) * 2) {
635 TRACE(DL_ERR, "Invalid MCC ring length:%d", length);
636 return BE_NOT_OK;
637 }
638 /*
639 * Reduce the actual ring size to be less than the number
640 * of context entries. This ensures that we run out of
641 * ring WRBs first so the queuing works correctly. We never
642 * queue based on context structs.
643 */
644 if (num_context_entries + 1 <
645 length / sizeof(struct MCC_WRB_AMAP) - 1) {
646
647 u32 max_length =
648 (num_context_entries + 2) * sizeof(struct MCC_WRB_AMAP);
649
650 if (is_power_of_2(max_length))
651 length = __roundup_pow_of_two(max_length+1) / 2;
652 else
653 length = __roundup_pow_of_two(max_length) / 2;
654
655 ASSERT(length <= max_length);
656
657 TRACE(DL_WARN,
658 "MCC ring length reduced based on context entries."
659 " length:%d wrbs:%d context_entries:%d", length,
660 (int) (length / sizeof(struct MCC_WRB_AMAP)),
661 num_context_entries);
662 }
663
664 spin_lock_irqsave(&pfob->post_lock, irql);
665
666 num_entries_encoded =
667 be_ring_length_to_encoding(length, sizeof(struct MCC_WRB_AMAP));
668
669 /* Init MCC object. */
670 memset(mcc, 0, sizeof(*mcc));
671 mcc->parent_function = pfob;
672 mcc->cq_object = cq;
673
674 INIT_LIST_HEAD(&mcc->backlog);
675
676 wrb = be_function_peek_mcc_wrb(pfob);
677 if (!wrb) {
678 ASSERT(wrb);
679 TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
680 status = BE_STATUS_NO_MCC_WRB;
681 goto error;
682 }
683 /* Prepares an embedded fwcmd, including request/response sizes. */
684 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MCC_CREATE);
685
686 fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
687 /*
688 * Program MCC ring context
689 */
690 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, pdid,
691 &fwcmd->params.request.context, 0);
692 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, invalid,
693 &fwcmd->params.request.context, false);
694 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, ring_size,
695 &fwcmd->params.request.context, num_entries_encoded);
696
697 n = cq->cq_id;
698 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT,
699 cq_id, &fwcmd->params.request.context, n);
700 be_rd_to_pa_list(rd, fwcmd->params.request.pages,
701 ARRAY_SIZE(fwcmd->params.request.pages));
702 /* Post the f/w command */
703 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
704 NULL, NULL, fwcmd, NULL);
705 if (status != BE_SUCCESS) {
706 TRACE(DL_ERR, "MCC to create CQ failed.");
707 goto error;
708 }
709 /*
710 * Create a linked list of context structures
711 */
712 mcc->wrb_context.base = context_array;
713 mcc->wrb_context.num = num_context_entries;
714 INIT_LIST_HEAD(&mcc->wrb_context.list_head);
715 memset(context_array, 0,
716 sizeof(struct be_mcc_wrb_context) * num_context_entries);
717 for (i = 0; i < mcc->wrb_context.num; i++) {
718 list_add_tail(&context_array[i].next,
719 &mcc->wrb_context.list_head);
720 }
721
722 /*
723 *
724 * Create an mcc_ring for tracking WRB hw ring
725 */
726 va = rd->va;
727 ASSERT(va);
728 mp_ring_create(&mcc->sq.ring, length / sizeof(struct MCC_WRB_AMAP),
729 sizeof(struct MCC_WRB_AMAP), va);
730 mcc->sq.ring.id = fwcmd->params.response.id;
731 /*
732 * Init a mcc_ring for tracking the MCC CQ.
733 */
734 ASSERT(cq->va);
735 mp_ring_create(&mcc->cq.ring, cq->num_entries,
736 sizeof(struct MCC_CQ_ENTRY_AMAP), cq->va);
737 mcc->cq.ring.id = cq->cq_id;
738
739 /* Force zeroing of CQ. */
740 memset(cq->va, 0, cq->num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP));
741
742 /* Initialize debug index. */
743 mcc->consumed_index = 0;
744
745 atomic_inc(&cq->ref_count);
746 pfob->mcc = mcc;
747
748 TRACE(DL_INFO, "MCC ring created. id:%d bytes:%d cq_id:%d cq_entries:%d"
749 " num_context:%d", mcc->sq.ring.id, length,
750 cq->cq_id, cq->num_entries, num_context_entries);
751
752error:
753 spin_unlock_irqrestore(&pfob->post_lock, irql);
754 if (pfob->pend_queue_driving && pfob->mcc) {
755 pfob->pend_queue_driving = 0;
756 be_drive_mcc_wrb_queue(pfob->mcc);
757 }
758 return status;
759}
760
761/*
762 This routine destroys an MCC send queue
763
764 MccObject - Internal Mcc Object to be destroyed.
765
766 Returns BE_SUCCESS if successfull, otherwise an error code is returned.
767
768 IRQL < DISPATCH_LEVEL
769
770 The caller of this routine must ensure that no other WRB may be posted
771 until this routine returns.
772
773*/
774int be_mcc_ring_destroy(struct be_mcc_object *mcc)
775{
776 int status = 0;
777 struct be_function_object *pfob = mcc->parent_function;
778
779
780 ASSERT(mcc->processing == 0);
781
782 /*
783 * Remove the ring from the function object.
784 * This transitions back to mailbox mode.
785 */
786 pfob->mcc = NULL;
787
788 /* Send fwcmd to destroy the queue. (Using the mailbox.) */
789 status = be_function_ring_destroy(mcc->parent_function, mcc->sq.ring.id,
790 FWCMD_RING_TYPE_MCC, NULL, NULL, NULL, NULL);
791 ASSERT(status == 0);
792
793 /* Release the SQ reference to the CQ */
794 atomic_dec(&mcc->cq_object->ref_count);
795
796 return status;
797}
798
799static void
800mcc_wrb_sync_cb(void *context, int staus, struct MCC_WRB_AMAP *wrb)
801{
802 struct be_mcc_wrb_context *wrb_context =
803 (struct be_mcc_wrb_context *) context;
804 ASSERT(wrb_context);
805 *wrb_context->users_final_status = staus;
806}
807
808/*
809 This routine posts a command to the MCC send queue
810
811 mcc - Internal Mcc Object to be destroyed.
812
813 wrb - wrb to post.
814
815 Returns BE_SUCCESS if successfull, otherwise an error code is returned.
816
817 IRQL < DISPATCH_LEVEL if CompletionCallback is not NULL
818 IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL
819
820 If this routine is called with CompletionCallback != NULL the
821 call is considered to be asynchronous and will return as soon
822 as the WRB is posted to the MCC with BE_PENDING.
823
824 If CompletionCallback is NULL, then this routine will not return until
825 a completion for this MCC command has been processed.
826 If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
827
828 This routine should only be called if the MPU has been boostraped past
829 mailbox mode.
830
831
832*/
833int
834_be_mpu_post_wrb_ring(struct be_mcc_object *mcc, struct MCC_WRB_AMAP *wrb,
835 struct be_mcc_wrb_context *wrb_context)
836{
837
838 struct MCC_WRB_AMAP *ring_wrb = NULL;
839 int status = BE_PENDING;
840 int final_status = BE_PENDING;
841 mcc_wrb_cqe_callback cb = NULL;
842 struct MCC_DB_AMAP mcc_db;
843 u32 embedded;
844
845 ASSERT(mp_ring_num_empty(&mcc->sq.ring) > 0);
846 /*
847 * Input wrb is most likely the next wrb in the ring, since the client
848 * can peek at the address.
849 */
850 ring_wrb = mp_ring_producer_ptr(&mcc->sq.ring);
851 if (wrb != ring_wrb) {
852 /* If not equal, copy it into the ring. */
853 memcpy(ring_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
854 }
855#ifdef BE_DEBUG
856 wrb_context->ring_wrb = ring_wrb;
857#endif
858 embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, ring_wrb);
859 if (embedded) {
860 /* embedded commands will have the response within the WRB. */
861 wrb_context->wrb = ring_wrb;
862 } else {
863 /*
864 * non-embedded commands will not have the response
865 * within the WRB, and they may complete out-of-order.
866 * The WRB will not be valid to inspect
867 * during the completion.
868 */
869 wrb_context->wrb = NULL;
870 }
871 cb = wrb_context->cb;
872
873 if (cb == NULL) {
874 /* Assign our internal callback if this is a
875 * synchronous call. */
876 wrb_context->cb = mcc_wrb_sync_cb;
877 wrb_context->cb_context = wrb_context;
878 wrb_context->users_final_status = &final_status;
879 }
880 /* Increment producer index */
881
882 mcc_db.dw[0] = 0; /* initialize */
883 AMAP_SET_BITS_PTR(MCC_DB, rid, &mcc_db, mcc->sq.ring.id);
884 AMAP_SET_BITS_PTR(MCC_DB, numPosted, &mcc_db, 1);
885
886 mp_ring_produce(&mcc->sq.ring);
887 PD_WRITE(mcc->parent_function, mpu_mcc_db, mcc_db.dw[0]);
888 TRACE(DL_INFO, "pidx: %x and cidx: %x.", mcc->sq.ring.pidx,
889 mcc->sq.ring.cidx);
890
891 if (cb == NULL) {
892 int polls = 0; /* At >= 1 us per poll */
893 /* Wait until this command completes, polling the CQ. */
894 do {
895 TRACE(DL_INFO, "FWCMD submitted in the poll mode.");
896 /* Do not rearm CQ in this context. */
897 be_mcc_process_cq(mcc, false);
898
899 if (final_status == BE_PENDING) {
900 if ((++polls & 0x7FFFF) == 0) {
901 TRACE(DL_WARN,
902 "Warning : polling MCC CQ for %d"
903 "ms.", polls / 1000);
904 }
905
906 udelay(1);
907 }
908
909 /* final_status changed when the command completes */
910 } while (final_status == BE_PENDING);
911
912 status = final_status;
913 }
914
915 return status;
916}
917
918struct MCC_WRB_AMAP *
919_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue)
920{
921 /* If we have queued items, do not allow a post to bypass the queue. */
922 if (!driving_queue && !list_empty(&mcc->backlog))
923 return NULL;
924
925 if (mp_ring_num_empty(&mcc->sq.ring) <= 0)
926 return NULL;
927 return (struct MCC_WRB_AMAP *) mp_ring_producer_ptr(&mcc->sq.ring);
928}
929
930int
931be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *mailbox)
932{
933 ASSERT(mailbox);
934 pfob->mailbox.va = mailbox->va;
935 pfob->mailbox.pa = cpu_to_le64(mailbox->pa);
936 pfob->mailbox.length = mailbox->length;
937
938 ASSERT(((u32)(size_t)pfob->mailbox.va & 0xf) == 0);
939 ASSERT(((u32)(size_t)pfob->mailbox.pa & 0xf) == 0);
940 /*
941 * Issue the WRB to set MPU endianness
942 */
943 {
944 u64 *endian_check = (u64 *) (pfob->mailbox.va +
945 offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8);
946 *endian_check = 0xFF1234FFFF5678FFULL;
947 }
948
949 be_mcc_mailbox_notify_and_wait(pfob);
950
951 return BE_SUCCESS;
952}
953
954
955/*
956 This routine posts a command to the MCC mailbox.
957
958 FuncObj - Function Object to post the WRB on behalf of.
959 wrb - wrb to post.
960 CompletionCallback - Address of a callback routine to invoke once the WRB
961 is completed.
962 CompletionCallbackContext - Opaque context to be passed during the call to
963 the CompletionCallback.
964 Returns BE_SUCCESS if successfull, otherwise an error code is returned.
965
966 IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL
967
968 This routine will block until a completion for this MCC command has been
969 processed. If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
970
971 This routine should only be called if the MPU has not been boostraped past
972 mailbox mode.
973*/
974int
975_be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
976 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context)
977{
978 struct MCC_MAILBOX_AMAP *mailbox = NULL;
979 struct MCC_WRB_AMAP *mb_wrb;
980 struct MCC_CQ_ENTRY_AMAP *mb_cq;
981 u32 offset, status;
982
983 ASSERT(pfob->mcc == NULL);
984 mailbox = pfob->mailbox.va;
985 ASSERT(mailbox);
986
987 offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
988 mb_wrb = (struct MCC_WRB_AMAP *) (u8 *)mailbox + offset;
989 if (mb_wrb != wrb) {
990 memset(mailbox, 0, sizeof(*mailbox));
991 memcpy(mb_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
992 }
993 /* The callback can inspect the final WRB to get output parameters. */
994 wrb_context->wrb = mb_wrb;
995
996 be_mcc_mailbox_notify_and_wait(pfob);
997
998 /* A command completed. Use tag to determine which command. */
999 offset = offsetof(struct BE_MCC_MAILBOX_AMAP, cq)/8;
1000 mb_cq = (struct MCC_CQ_ENTRY_AMAP *) ((u8 *)mailbox + offset);
1001 be_mcc_process_cqe(pfob, mb_cq);
1002
1003 status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, mb_cq);
1004 if (status)
1005 status = BE_NOT_OK;
1006 return status;
1007}
1008
1009struct be_mcc_wrb_context *
1010_be_mcc_allocate_wrb_context(struct be_function_object *pfob)
1011{
1012 struct be_mcc_wrb_context *context = NULL;
1013 unsigned long irq;
1014
1015 spin_lock_irqsave(&pfob->mcc_context_lock, irq);
1016
1017 if (!pfob->mailbox.default_context_allocated) {
1018 /* Use the single default context that we
1019 * always have allocated. */
1020 pfob->mailbox.default_context_allocated = true;
1021 context = &pfob->mailbox.default_context;
1022 } else if (pfob->mcc) {
1023 /* Get a context from the free list. If any are available. */
1024 if (!list_empty(&pfob->mcc->wrb_context.list_head)) {
1025 context = list_first_entry(
1026 &pfob->mcc->wrb_context.list_head,
1027 struct be_mcc_wrb_context, next);
1028 }
1029 }
1030
1031 spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
1032
1033 return context;
1034}
1035
1036void
1037_be_mcc_free_wrb_context(struct be_function_object *pfob,
1038 struct be_mcc_wrb_context *context)
1039{
1040 unsigned long irq;
1041
1042 ASSERT(context);
1043 /*
1044 * Zero during free to try and catch any bugs where the context
1045 * is accessed after a free.
1046 */
1047 memset(context, 0, sizeof(context));
1048
1049 spin_lock_irqsave(&pfob->mcc_context_lock, irq);
1050
1051 if (context == &pfob->mailbox.default_context) {
1052 /* Free the default context. */
1053 ASSERT(pfob->mailbox.default_context_allocated);
1054 pfob->mailbox.default_context_allocated = false;
1055 } else {
1056 /* Add to free list. */
1057 ASSERT(pfob->mcc);
1058 list_add_tail(&context->next,
1059 &pfob->mcc->wrb_context.list_head);
1060 }
1061
1062 spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
1063}
1064
1065int
1066be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
1067 mcc_async_event_callback cb, void *cb_context)
1068{
1069 /* Lock against anyone trying to change the callback/context pointers
1070 * while being used. */
1071 spin_lock_irqsave(&mcc_object->parent_function->cq_lock,
1072 mcc_object->parent_function->cq_irq);
1073
1074 /* Assign the async callback. */
1075 mcc_object->async_context = cb_context;
1076 mcc_object->async_cb = cb;
1077
1078 spin_unlock_irqrestore(&mcc_object->parent_function->cq_lock,
1079 mcc_object->parent_function->cq_irq);
1080
1081 return BE_SUCCESS;
1082}
1083
1084#define MPU_EP_CONTROL 0
1085#define MPU_EP_SEMAPHORE 0xac
1086
1087/*
1088 *-------------------------------------------------------------------
1089 * Function: be_wait_for_POST_complete
1090 * Waits until the BladeEngine POST completes (either in error or success).
1091 * pfob -
1092 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1093 *-------------------------------------------------------------------
1094 */
1095static int be_wait_for_POST_complete(struct be_function_object *pfob)
1096{
1097 struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
1098 int s;
1099 u32 post_error, post_stage;
1100
1101 const u32 us_per_loop = 1000; /* 1000us */
1102 const u32 print_frequency_loops = 1000000 / us_per_loop;
1103 const u32 max_loops = 60 * print_frequency_loops;
1104 u32 loops = 0;
1105
1106 /*
1107 * Wait for arm fw indicating it is done or a fatal error happened.
1108 * Note: POST can take some time to complete depending on configuration
1109 * settings (consider ARM attempts to acquire an IP address
1110 * over DHCP!!!).
1111 *
1112 */
1113 do {
1114 status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
1115 post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1116 error, &status);
1117 post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1118 stage, &status);
1119 if (0 == (loops % print_frequency_loops)) {
1120 /* Print current status */
1121 TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
1122 status.dw[0], post_stage);
1123 }
1124 udelay(us_per_loop);
1125 } while ((post_error != 1) &&
1126 (post_stage != POST_STAGE_ARMFW_READY) &&
1127 (++loops < max_loops));
1128
1129 if (post_error == 1) {
1130 TRACE(DL_ERR, "POST error! Status = 0x%x (stage = 0x%x)",
1131 status.dw[0], post_stage);
1132 s = BE_NOT_OK;
1133 } else if (post_stage != POST_STAGE_ARMFW_READY) {
1134 TRACE(DL_ERR, "POST time-out! Status = 0x%x (stage = 0x%x)",
1135 status.dw[0], post_stage);
1136 s = BE_NOT_OK;
1137 } else {
1138 s = BE_SUCCESS;
1139 }
1140 return s;
1141}
1142
1143/*
1144 *-------------------------------------------------------------------
1145 * Function: be_kickoff_and_wait_for_POST
1146 * Interacts with the BladeEngine management processor to initiate POST, and
1147 * subsequently waits until POST completes (either in error or success).
1148 * The caller must acquire the reset semaphore before initiating POST
1149 * to prevent multiple drivers interacting with the management processor.
1150 * Once POST is complete the caller must release the reset semaphore.
1151 * Callers who only want to wait for POST complete may call
1152 * be_wait_for_POST_complete.
1153 * pfob -
1154 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1155 *-------------------------------------------------------------------
1156 */
1157static int
1158be_kickoff_and_wait_for_POST(struct be_function_object *pfob)
1159{
1160 struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
1161 int s;
1162
1163 const u32 us_per_loop = 1000; /* 1000us */
1164 const u32 print_frequency_loops = 1000000 / us_per_loop;
1165 const u32 max_loops = 5 * print_frequency_loops;
1166 u32 loops = 0;
1167 u32 post_error, post_stage;
1168
1169 /* Wait for arm fw awaiting host ready or a fatal error happened. */
1170 TRACE(DL_INFO, "Wait for BladeEngine ready to POST");
1171 do {
1172 status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
1173 post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1174 error, &status);
1175 post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1176 stage, &status);
1177 if (0 == (loops % print_frequency_loops)) {
1178 /* Print current status */
1179 TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
1180 status.dw[0], post_stage);
1181 }
1182 udelay(us_per_loop);
1183 } while ((post_error != 1) &&
1184 (post_stage < POST_STAGE_AWAITING_HOST_RDY) &&
1185 (++loops < max_loops));
1186
1187 if (post_error == 1) {
1188 TRACE(DL_ERR, "Pre-POST error! Status = 0x%x (stage = 0x%x)",
1189 status.dw[0], post_stage);
1190 s = BE_NOT_OK;
1191 } else if (post_stage == POST_STAGE_AWAITING_HOST_RDY) {
1192 iowrite32(POST_STAGE_HOST_RDY, pfob->csr_va + MPU_EP_SEMAPHORE);
1193
1194 /* Wait for POST to complete */
1195 s = be_wait_for_POST_complete(pfob);
1196 } else {
1197 /*
1198 * Either a timeout waiting for host ready signal or POST has
1199 * moved ahead without requiring a host ready signal.
1200 * Might as well give POST a chance to complete
1201 * (or timeout again).
1202 */
1203 s = be_wait_for_POST_complete(pfob);
1204 }
1205 return s;
1206}
1207
1208/*
1209 *-------------------------------------------------------------------
1210 * Function: be_pci_soft_reset
1211 * This function is called to issue a BladeEngine soft reset.
1212 * Callers should acquire the soft reset semaphore before calling this
1213 * function. Additionaly, callers should ensure they cannot be pre-empted
1214 * while the routine executes. Upon completion of this routine, callers
1215 * should release the reset semaphore. This routine implicitly waits
1216 * for BladeEngine POST to complete.
1217 * pfob -
1218 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1219 *-------------------------------------------------------------------
1220 */
1221int be_pci_soft_reset(struct be_function_object *pfob)
1222{
1223 struct PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
1224 struct PCICFG_ONLINE0_CSR_AMAP pciOnline0;
1225 struct PCICFG_ONLINE1_CSR_AMAP pciOnline1;
1226 struct EP_CONTROL_CSR_AMAP epControlCsr;
1227 int status = BE_SUCCESS;
1228 u32 i, soft_reset_bit;
1229
1230 TRACE(DL_NOTE, "PCI reset...");
1231
1232 /* Issue soft reset #1 to get BladeEngine into a known state. */
1233 soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
1234 AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
1235 PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
1236 /*
1237 * wait til soft reset is deasserted - hardware
1238 * deasserts after some time.
1239 */
1240 i = 0;
1241 do {
1242 udelay(50);
1243 soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
1244 soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
1245 softreset, soft_reset.dw);
1246 } while (soft_reset_bit && (i++ < 1024));
1247 if (soft_reset_bit != 0) {
1248 TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
1249 status = BE_NOT_OK;
1250 goto Error_label;
1251 }
1252 /* Mask everything */
1253 PCICFG0_WRITE(pfob, ue_status_low_mask, 0xFFFFFFFF);
1254 PCICFG0_WRITE(pfob, ue_status_hi_mask, 0xFFFFFFFF);
1255 /*
1256 * Set everything offline except MPU IRAM (it is offline with
1257 * the soft-reset, but soft-reset does not reset the PCICFG registers!)
1258 */
1259 pciOnline0.dw[0] = 0;
1260 pciOnline1.dw[0] = 0;
1261 AMAP_SET_BITS_PTR(PCICFG_ONLINE1_CSR, mpu_iram_online,
1262 pciOnline1.dw, 1);
1263 PCICFG0_WRITE(pfob, online0, pciOnline0.dw[0]);
1264 PCICFG0_WRITE(pfob, online1, pciOnline1.dw[0]);
1265
1266 udelay(20000);
1267
1268 /* Issue soft reset #2. */
1269 AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
1270 PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
1271 /*
1272 * wait til soft reset is deasserted - hardware
1273 * deasserts after some time.
1274 */
1275 i = 0;
1276 do {
1277 udelay(50);
1278 soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
1279 soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
1280 softreset, soft_reset.dw);
1281 } while (soft_reset_bit && (i++ < 1024));
1282 if (soft_reset_bit != 0) {
1283 TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
1284 status = BE_NOT_OK;
1285 goto Error_label;
1286 }
1287
1288
1289 udelay(20000);
1290
1291 /* Take MPU out of reset. */
1292
1293 epControlCsr.dw[0] = ioread32(pfob->csr_va + MPU_EP_CONTROL);
1294 AMAP_SET_BITS_PTR(EP_CONTROL_CSR, CPU_reset, &epControlCsr, 0);
1295 iowrite32((u32)epControlCsr.dw[0], pfob->csr_va + MPU_EP_CONTROL);
1296
1297 /* Kickoff BE POST and wait for completion */
1298 status = be_kickoff_and_wait_for_POST(pfob);
1299
1300Error_label:
1301 return status;
1302}
1303
1304
1305/*
1306 *-------------------------------------------------------------------
1307 * Function: be_pci_reset_required
1308 * This private function is called to detect if a host entity is
1309 * required to issue a PCI soft reset and subsequently drive
1310 * BladeEngine POST. Scenarios where this is required:
1311 * 1) BIOS-less configuration
1312 * 2) Hot-swap/plug/power-on
1313 * pfob -
1314 * return true if a reset is required, false otherwise
1315 *-------------------------------------------------------------------
1316 */
1317static bool be_pci_reset_required(struct be_function_object *pfob)
1318{
1319 struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
1320 bool do_reset = false;
1321 u32 post_error, post_stage;
1322
1323 /*
1324 * Read the POST status register
1325 */
1326 status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
1327 post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, error,
1328 &status);
1329 post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, stage,
1330 &status);
1331 if (post_stage <= POST_STAGE_AWAITING_HOST_RDY) {
1332 /*
1333 * If BladeEngine is waiting for host ready indication,
1334 * we want to do a PCI reset.
1335 */
1336 do_reset = true;
1337 }
1338
1339 return do_reset;
1340}
1341
1342/*
1343 *-------------------------------------------------------------------
1344 * Function: be_drive_POST
1345 * This function is called to drive BladeEngine POST. The
1346 * caller should ensure they cannot be pre-empted while this routine executes.
1347 * pfob -
1348 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1349 *-------------------------------------------------------------------
1350 */
1351int be_drive_POST(struct be_function_object *pfob)
1352{
1353 int status;
1354
1355 if (false != be_pci_reset_required(pfob)) {
1356 /* PCI reset is needed (implicitly starts and waits for POST) */
1357 status = be_pci_soft_reset(pfob);
1358 } else {
1359 /* No PCI reset is needed, start POST */
1360 status = be_kickoff_and_wait_for_POST(pfob);
1361 }
1362
1363 return status;
1364}
diff --git a/drivers/staging/benet/mpu.h b/drivers/staging/benet/mpu.h
deleted file mode 100644
index 41f3f87516e5..000000000000
--- a/drivers/staging/benet/mpu.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __mpu_amap_h__
21#define __mpu_amap_h__
22#include "ep.h"
23
24/* Provide control parameters for the Managment Processor Unit. */
25struct BE_MPU_CSRMAP_AMAP {
26 struct BE_EP_CSRMAP_AMAP ep;
27 u8 rsvd0[128]; /* DWORD 64 */
28 u8 rsvd1[32]; /* DWORD 68 */
29 u8 rsvd2[192]; /* DWORD 69 */
30 u8 rsvd3[192]; /* DWORD 75 */
31 u8 rsvd4[32]; /* DWORD 81 */
32 u8 rsvd5[32]; /* DWORD 82 */
33 u8 rsvd6[32]; /* DWORD 83 */
34 u8 rsvd7[32]; /* DWORD 84 */
35 u8 rsvd8[32]; /* DWORD 85 */
36 u8 rsvd9[32]; /* DWORD 86 */
37 u8 rsvd10[32]; /* DWORD 87 */
38 u8 rsvd11[32]; /* DWORD 88 */
39 u8 rsvd12[32]; /* DWORD 89 */
40 u8 rsvd13[32]; /* DWORD 90 */
41 u8 rsvd14[32]; /* DWORD 91 */
42 u8 rsvd15[32]; /* DWORD 92 */
43 u8 rsvd16[32]; /* DWORD 93 */
44 u8 rsvd17[32]; /* DWORD 94 */
45 u8 rsvd18[32]; /* DWORD 95 */
46 u8 rsvd19[32]; /* DWORD 96 */
47 u8 rsvd20[32]; /* DWORD 97 */
48 u8 rsvd21[32]; /* DWORD 98 */
49 u8 rsvd22[32]; /* DWORD 99 */
50 u8 rsvd23[32]; /* DWORD 100 */
51 u8 rsvd24[32]; /* DWORD 101 */
52 u8 rsvd25[32]; /* DWORD 102 */
53 u8 rsvd26[32]; /* DWORD 103 */
54 u8 rsvd27[32]; /* DWORD 104 */
55 u8 rsvd28[96]; /* DWORD 105 */
56 u8 rsvd29[32]; /* DWORD 108 */
57 u8 rsvd30[32]; /* DWORD 109 */
58 u8 rsvd31[32]; /* DWORD 110 */
59 u8 rsvd32[32]; /* DWORD 111 */
60 u8 rsvd33[32]; /* DWORD 112 */
61 u8 rsvd34[96]; /* DWORD 113 */
62 u8 rsvd35[32]; /* DWORD 116 */
63 u8 rsvd36[32]; /* DWORD 117 */
64 u8 rsvd37[32]; /* DWORD 118 */
65 u8 rsvd38[32]; /* DWORD 119 */
66 u8 rsvd39[32]; /* DWORD 120 */
67 u8 rsvd40[32]; /* DWORD 121 */
68 u8 rsvd41[134][32]; /* DWORD 122 */
69} __packed;
70struct MPU_CSRMAP_AMAP {
71 u32 dw[256];
72};
73
74#endif /* __mpu_amap_h__ */
diff --git a/drivers/staging/benet/mpu_context.h b/drivers/staging/benet/mpu_context.h
deleted file mode 100644
index 8ce90f9c46c2..000000000000
--- a/drivers/staging/benet/mpu_context.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __mpu_context_amap_h__
21#define __mpu_context_amap_h__
22
23/*
24 * Management command and control ring context. The MPUs BTLR_CTRL1 CSR
25 * controls the writeback behavior of the producer and consumer index values.
26 */
27struct BE_MCC_RING_CONTEXT_AMAP {
28 u8 con_index[16]; /* DWORD 0 */
29 u8 ring_size[4]; /* DWORD 0 */
30 u8 cq_id[11]; /* DWORD 0 */
31 u8 rsvd0; /* DWORD 0 */
32 u8 prod_index[16]; /* DWORD 1 */
33 u8 pdid[15]; /* DWORD 1 */
34 u8 invalid; /* DWORD 1 */
35 u8 cmd_pending_current[7]; /* DWORD 2 */
36 u8 rsvd1[25]; /* DWORD 2 */
37 u8 hpi_port_cq_id[11]; /* DWORD 3 */
38 u8 rsvd2[5]; /* DWORD 3 */
39 u8 cmd_pending_max[7]; /* DWORD 3 */
40 u8 rsvd3[9]; /* DWORD 3 */
41} __packed;
42struct MCC_RING_CONTEXT_AMAP {
43 u32 dw[4];
44};
45
46#endif /* __mpu_context_amap_h__ */
diff --git a/drivers/staging/benet/pcicfg.h b/drivers/staging/benet/pcicfg.h
deleted file mode 100644
index 7c15684adf4a..000000000000
--- a/drivers/staging/benet/pcicfg.h
+++ /dev/null
@@ -1,825 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __pcicfg_amap_h__
21#define __pcicfg_amap_h__
22
23/* Vendor and Device ID Register. */
24struct BE_PCICFG_ID_CSR_AMAP {
25 u8 vendorid[16]; /* DWORD 0 */
26 u8 deviceid[16]; /* DWORD 0 */
27} __packed;
28struct PCICFG_ID_CSR_AMAP {
29 u32 dw[1];
30};
31
32/* IO Bar Register. */
33struct BE_PCICFG_IOBAR_CSR_AMAP {
34 u8 iospace; /* DWORD 0 */
35 u8 rsvd0[7]; /* DWORD 0 */
36 u8 iobar[24]; /* DWORD 0 */
37} __packed;
38struct PCICFG_IOBAR_CSR_AMAP {
39 u32 dw[1];
40};
41
42/* Memory BAR 0 Register. */
43struct BE_PCICFG_MEMBAR0_CSR_AMAP {
44 u8 memspace; /* DWORD 0 */
45 u8 type[2]; /* DWORD 0 */
46 u8 pf; /* DWORD 0 */
47 u8 rsvd0[10]; /* DWORD 0 */
48 u8 membar0[18]; /* DWORD 0 */
49} __packed;
50struct PCICFG_MEMBAR0_CSR_AMAP {
51 u32 dw[1];
52};
53
54/* Memory BAR 1 - Low Address Register. */
55struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP {
56 u8 memspace; /* DWORD 0 */
57 u8 type[2]; /* DWORD 0 */
58 u8 pf; /* DWORD 0 */
59 u8 rsvd0[13]; /* DWORD 0 */
60 u8 membar1lo[15]; /* DWORD 0 */
61} __packed;
62struct PCICFG_MEMBAR1_LO_CSR_AMAP {
63 u32 dw[1];
64};
65
66/* Memory BAR 1 - High Address Register. */
67struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP {
68 u8 membar1hi[32]; /* DWORD 0 */
69} __packed;
70struct PCICFG_MEMBAR1_HI_CSR_AMAP {
71 u32 dw[1];
72};
73
74/* Memory BAR 2 - Low Address Register. */
75struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP {
76 u8 memspace; /* DWORD 0 */
77 u8 type[2]; /* DWORD 0 */
78 u8 pf; /* DWORD 0 */
79 u8 rsvd0[17]; /* DWORD 0 */
80 u8 membar2lo[11]; /* DWORD 0 */
81} __packed;
82struct PCICFG_MEMBAR2_LO_CSR_AMAP {
83 u32 dw[1];
84};
85
86/* Memory BAR 2 - High Address Register. */
87struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP {
88 u8 membar2hi[32]; /* DWORD 0 */
89} __packed;
90struct PCICFG_MEMBAR2_HI_CSR_AMAP {
91 u32 dw[1];
92};
93
94/* Subsystem Vendor and ID (Function 0) Register. */
95struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
96 u8 subsys_vendor_id[16]; /* DWORD 0 */
97 u8 subsys_id[16]; /* DWORD 0 */
98} __packed;
99struct PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
100 u32 dw[1];
101};
102
103/* Subsystem Vendor and ID (Function 1) Register. */
104struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
105 u8 subsys_vendor_id[16]; /* DWORD 0 */
106 u8 subsys_id[16]; /* DWORD 0 */
107} __packed;
108struct PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
109 u32 dw[1];
110};
111
112/* Semaphore Register. */
113struct BE_PCICFG_SEMAPHORE_CSR_AMAP {
114 u8 locked; /* DWORD 0 */
115 u8 rsvd0[31]; /* DWORD 0 */
116} __packed;
117struct PCICFG_SEMAPHORE_CSR_AMAP {
118 u32 dw[1];
119};
120
121/* Soft Reset Register. */
122struct BE_PCICFG_SOFT_RESET_CSR_AMAP {
123 u8 rsvd0[7]; /* DWORD 0 */
124 u8 softreset; /* DWORD 0 */
125 u8 rsvd1[16]; /* DWORD 0 */
126 u8 nec_ll_rcvdetect_i[8]; /* DWORD 0 */
127} __packed;
128struct PCICFG_SOFT_RESET_CSR_AMAP {
129 u32 dw[1];
130};
131
132/* Unrecoverable Error Status (Low) Register. Each bit corresponds to
133 * an internal Unrecoverable Error. These are set by hardware and may be
134 * cleared by writing a one to the respective bit(s) to be cleared. Any
135 * bit being set that is also unmasked will result in Unrecoverable Error
136 * interrupt notification to the host CPU and/or Server Management chip
137 * and the transitioning of BladeEngine to an Offline state.
138 */
139struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP {
140 u8 cev_ue_status; /* DWORD 0 */
141 u8 ctx_ue_status; /* DWORD 0 */
142 u8 dbuf_ue_status; /* DWORD 0 */
143 u8 erx_ue_status; /* DWORD 0 */
144 u8 host_ue_status; /* DWORD 0 */
145 u8 mpu_ue_status; /* DWORD 0 */
146 u8 ndma_ue_status; /* DWORD 0 */
147 u8 ptc_ue_status; /* DWORD 0 */
148 u8 rdma_ue_status; /* DWORD 0 */
149 u8 rxf_ue_status; /* DWORD 0 */
150 u8 rxips_ue_status; /* DWORD 0 */
151 u8 rxulp0_ue_status; /* DWORD 0 */
152 u8 rxulp1_ue_status; /* DWORD 0 */
153 u8 rxulp2_ue_status; /* DWORD 0 */
154 u8 tim_ue_status; /* DWORD 0 */
155 u8 tpost_ue_status; /* DWORD 0 */
156 u8 tpre_ue_status; /* DWORD 0 */
157 u8 txips_ue_status; /* DWORD 0 */
158 u8 txulp0_ue_status; /* DWORD 0 */
159 u8 txulp1_ue_status; /* DWORD 0 */
160 u8 uc_ue_status; /* DWORD 0 */
161 u8 wdma_ue_status; /* DWORD 0 */
162 u8 txulp2_ue_status; /* DWORD 0 */
163 u8 host1_ue_status; /* DWORD 0 */
164 u8 p0_ob_link_ue_status; /* DWORD 0 */
165 u8 p1_ob_link_ue_status; /* DWORD 0 */
166 u8 host_gpio_ue_status; /* DWORD 0 */
167 u8 mbox_netw_ue_status; /* DWORD 0 */
168 u8 mbox_stor_ue_status; /* DWORD 0 */
169 u8 axgmac0_ue_status; /* DWORD 0 */
170 u8 axgmac1_ue_status; /* DWORD 0 */
171 u8 mpu_intpend_ue_status; /* DWORD 0 */
172} __packed;
173struct PCICFG_UE_STATUS_LOW_CSR_AMAP {
174 u32 dw[1];
175};
176
177/* Unrecoverable Error Status (High) Register. Each bit corresponds to
178 * an internal Unrecoverable Error. These are set by hardware and may be
179 * cleared by writing a one to the respective bit(s) to be cleared. Any
180 * bit being set that is also unmasked will result in Unrecoverable Error
181 * interrupt notification to the host CPU and/or Server Management chip;
182 * and the transitioning of BladeEngine to an Offline state.
183 */
184struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP {
185 u8 jtag_ue_status; /* DWORD 0 */
186 u8 lpcmemhost_ue_status; /* DWORD 0 */
187 u8 mgmt_mac_ue_status; /* DWORD 0 */
188 u8 mpu_iram_ue_status; /* DWORD 0 */
189 u8 pcs0online_ue_status; /* DWORD 0 */
190 u8 pcs1online_ue_status; /* DWORD 0 */
191 u8 pctl0_ue_status; /* DWORD 0 */
192 u8 pctl1_ue_status; /* DWORD 0 */
193 u8 pmem_ue_status; /* DWORD 0 */
194 u8 rr_ue_status; /* DWORD 0 */
195 u8 rxpp_ue_status; /* DWORD 0 */
196 u8 txpb_ue_status; /* DWORD 0 */
197 u8 txp_ue_status; /* DWORD 0 */
198 u8 xaui_ue_status; /* DWORD 0 */
199 u8 arm_ue_status; /* DWORD 0 */
200 u8 ipc_ue_status; /* DWORD 0 */
201 u8 rsvd0[16]; /* DWORD 0 */
202} __packed;
203struct PCICFG_UE_STATUS_HI_CSR_AMAP {
204 u32 dw[1];
205};
206
207/* Unrecoverable Error Mask (Low) Register. Each bit, when set to one,
208 * will mask the associated Unrecoverable Error status bit from notification
209 * of Unrecoverable Error to the host CPU and/or Server Managment chip and the
210 * transitioning of all BladeEngine units to an Offline state.
211 */
212struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
213 u8 cev_ue_mask; /* DWORD 0 */
214 u8 ctx_ue_mask; /* DWORD 0 */
215 u8 dbuf_ue_mask; /* DWORD 0 */
216 u8 erx_ue_mask; /* DWORD 0 */
217 u8 host_ue_mask; /* DWORD 0 */
218 u8 mpu_ue_mask; /* DWORD 0 */
219 u8 ndma_ue_mask; /* DWORD 0 */
220 u8 ptc_ue_mask; /* DWORD 0 */
221 u8 rdma_ue_mask; /* DWORD 0 */
222 u8 rxf_ue_mask; /* DWORD 0 */
223 u8 rxips_ue_mask; /* DWORD 0 */
224 u8 rxulp0_ue_mask; /* DWORD 0 */
225 u8 rxulp1_ue_mask; /* DWORD 0 */
226 u8 rxulp2_ue_mask; /* DWORD 0 */
227 u8 tim_ue_mask; /* DWORD 0 */
228 u8 tpost_ue_mask; /* DWORD 0 */
229 u8 tpre_ue_mask; /* DWORD 0 */
230 u8 txips_ue_mask; /* DWORD 0 */
231 u8 txulp0_ue_mask; /* DWORD 0 */
232 u8 txulp1_ue_mask; /* DWORD 0 */
233 u8 uc_ue_mask; /* DWORD 0 */
234 u8 wdma_ue_mask; /* DWORD 0 */
235 u8 txulp2_ue_mask; /* DWORD 0 */
236 u8 host1_ue_mask; /* DWORD 0 */
237 u8 p0_ob_link_ue_mask; /* DWORD 0 */
238 u8 p1_ob_link_ue_mask; /* DWORD 0 */
239 u8 host_gpio_ue_mask; /* DWORD 0 */
240 u8 mbox_netw_ue_mask; /* DWORD 0 */
241 u8 mbox_stor_ue_mask; /* DWORD 0 */
242 u8 axgmac0_ue_mask; /* DWORD 0 */
243 u8 axgmac1_ue_mask; /* DWORD 0 */
244 u8 mpu_intpend_ue_mask; /* DWORD 0 */
245} __packed;
246struct PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
247 u32 dw[1];
248};
249
250/* Unrecoverable Error Mask (High) Register. Each bit, when set to one,
251 * will mask the associated Unrecoverable Error status bit from notification
252 * of Unrecoverable Error to the host CPU and/or Server Managment chip and the
253 * transitioning of all BladeEngine units to an Offline state.
254 */
255struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
256 u8 jtag_ue_mask; /* DWORD 0 */
257 u8 lpcmemhost_ue_mask; /* DWORD 0 */
258 u8 mgmt_mac_ue_mask; /* DWORD 0 */
259 u8 mpu_iram_ue_mask; /* DWORD 0 */
260 u8 pcs0online_ue_mask; /* DWORD 0 */
261 u8 pcs1online_ue_mask; /* DWORD 0 */
262 u8 pctl0_ue_mask; /* DWORD 0 */
263 u8 pctl1_ue_mask; /* DWORD 0 */
264 u8 pmem_ue_mask; /* DWORD 0 */
265 u8 rr_ue_mask; /* DWORD 0 */
266 u8 rxpp_ue_mask; /* DWORD 0 */
267 u8 txpb_ue_mask; /* DWORD 0 */
268 u8 txp_ue_mask; /* DWORD 0 */
269 u8 xaui_ue_mask; /* DWORD 0 */
270 u8 arm_ue_mask; /* DWORD 0 */
271 u8 ipc_ue_mask; /* DWORD 0 */
272 u8 rsvd0[16]; /* DWORD 0 */
273} __packed;
274struct PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
275 u32 dw[1];
276};
277
278/* Online Control Register 0. This register controls various units within
279 * BladeEngine being in an Online or Offline state.
280 */
281struct BE_PCICFG_ONLINE0_CSR_AMAP {
282 u8 cev_online; /* DWORD 0 */
283 u8 ctx_online; /* DWORD 0 */
284 u8 dbuf_online; /* DWORD 0 */
285 u8 erx_online; /* DWORD 0 */
286 u8 host_online; /* DWORD 0 */
287 u8 mpu_online; /* DWORD 0 */
288 u8 ndma_online; /* DWORD 0 */
289 u8 ptc_online; /* DWORD 0 */
290 u8 rdma_online; /* DWORD 0 */
291 u8 rxf_online; /* DWORD 0 */
292 u8 rxips_online; /* DWORD 0 */
293 u8 rxulp0_online; /* DWORD 0 */
294 u8 rxulp1_online; /* DWORD 0 */
295 u8 rxulp2_online; /* DWORD 0 */
296 u8 tim_online; /* DWORD 0 */
297 u8 tpost_online; /* DWORD 0 */
298 u8 tpre_online; /* DWORD 0 */
299 u8 txips_online; /* DWORD 0 */
300 u8 txulp0_online; /* DWORD 0 */
301 u8 txulp1_online; /* DWORD 0 */
302 u8 uc_online; /* DWORD 0 */
303 u8 wdma_online; /* DWORD 0 */
304 u8 txulp2_online; /* DWORD 0 */
305 u8 host1_online; /* DWORD 0 */
306 u8 p0_ob_link_online; /* DWORD 0 */
307 u8 p1_ob_link_online; /* DWORD 0 */
308 u8 host_gpio_online; /* DWORD 0 */
309 u8 mbox_netw_online; /* DWORD 0 */
310 u8 mbox_stor_online; /* DWORD 0 */
311 u8 axgmac0_online; /* DWORD 0 */
312 u8 axgmac1_online; /* DWORD 0 */
313 u8 mpu_intpend_online; /* DWORD 0 */
314} __packed;
315struct PCICFG_ONLINE0_CSR_AMAP {
316 u32 dw[1];
317};
318
319/* Online Control Register 1. This register controls various units within
320 * BladeEngine being in an Online or Offline state.
321 */
322struct BE_PCICFG_ONLINE1_CSR_AMAP {
323 u8 jtag_online; /* DWORD 0 */
324 u8 lpcmemhost_online; /* DWORD 0 */
325 u8 mgmt_mac_online; /* DWORD 0 */
326 u8 mpu_iram_online; /* DWORD 0 */
327 u8 pcs0online_online; /* DWORD 0 */
328 u8 pcs1online_online; /* DWORD 0 */
329 u8 pctl0_online; /* DWORD 0 */
330 u8 pctl1_online; /* DWORD 0 */
331 u8 pmem_online; /* DWORD 0 */
332 u8 rr_online; /* DWORD 0 */
333 u8 rxpp_online; /* DWORD 0 */
334 u8 txpb_online; /* DWORD 0 */
335 u8 txp_online; /* DWORD 0 */
336 u8 xaui_online; /* DWORD 0 */
337 u8 arm_online; /* DWORD 0 */
338 u8 ipc_online; /* DWORD 0 */
339 u8 rsvd0[16]; /* DWORD 0 */
340} __packed;
341struct PCICFG_ONLINE1_CSR_AMAP {
342 u32 dw[1];
343};
344
345/* Host Timer Register. */
346struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
347 u8 hosttimer[24]; /* DWORD 0 */
348 u8 hostintr; /* DWORD 0 */
349 u8 rsvd0[7]; /* DWORD 0 */
350} __packed;
351struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
352 u32 dw[1];
353};
354
355/* Scratchpad Register (for software use). */
356struct BE_PCICFG_SCRATCHPAD_CSR_AMAP {
357 u8 scratchpad[32]; /* DWORD 0 */
358} __packed;
359struct PCICFG_SCRATCHPAD_CSR_AMAP {
360 u32 dw[1];
361};
362
363/* PCI Express Capabilities Register. */
364struct BE_PCICFG_PCIE_CAP_CSR_AMAP {
365 u8 capid[8]; /* DWORD 0 */
366 u8 nextcap[8]; /* DWORD 0 */
367 u8 capver[4]; /* DWORD 0 */
368 u8 devport[4]; /* DWORD 0 */
369 u8 rsvd0[6]; /* DWORD 0 */
370 u8 rsvd1[2]; /* DWORD 0 */
371} __packed;
372struct PCICFG_PCIE_CAP_CSR_AMAP {
373 u32 dw[1];
374};
375
376/* PCI Express Device Capabilities Register. */
377struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP {
378 u8 payload[3]; /* DWORD 0 */
379 u8 rsvd0[3]; /* DWORD 0 */
380 u8 lo_lat[3]; /* DWORD 0 */
381 u8 l1_lat[3]; /* DWORD 0 */
382 u8 rsvd1[3]; /* DWORD 0 */
383 u8 rsvd2[3]; /* DWORD 0 */
384 u8 pwr_value[8]; /* DWORD 0 */
385 u8 pwr_scale[2]; /* DWORD 0 */
386 u8 rsvd3[4]; /* DWORD 0 */
387} __packed;
388struct PCICFG_PCIE_DEVCAP_CSR_AMAP {
389 u32 dw[1];
390};
391
392/* PCI Express Device Control/Status Registers. */
393struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
394 u8 CorrErrReportEn; /* DWORD 0 */
395 u8 NonFatalErrReportEn; /* DWORD 0 */
396 u8 FatalErrReportEn; /* DWORD 0 */
397 u8 UnsuppReqReportEn; /* DWORD 0 */
398 u8 EnableRelaxOrder; /* DWORD 0 */
399 u8 Max_Payload_Size[3]; /* DWORD 0 */
400 u8 ExtendTagFieldEnable; /* DWORD 0 */
401 u8 PhantomFnEnable; /* DWORD 0 */
402 u8 AuxPwrPMEnable; /* DWORD 0 */
403 u8 EnableNoSnoop; /* DWORD 0 */
404 u8 Max_Read_Req_Size[3]; /* DWORD 0 */
405 u8 rsvd0; /* DWORD 0 */
406 u8 CorrErrDetect; /* DWORD 0 */
407 u8 NonFatalErrDetect; /* DWORD 0 */
408 u8 FatalErrDetect; /* DWORD 0 */
409 u8 UnsuppReqDetect; /* DWORD 0 */
410 u8 AuxPwrDetect; /* DWORD 0 */
411 u8 TransPending; /* DWORD 0 */
412 u8 rsvd1[10]; /* DWORD 0 */
413} __packed;
414struct PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
415 u32 dw[1];
416};
417
418/* PCI Express Link Capabilities Register. */
419struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP {
420 u8 MaxLinkSpeed[4]; /* DWORD 0 */
421 u8 MaxLinkWidth[6]; /* DWORD 0 */
422 u8 ASPMSupport[2]; /* DWORD 0 */
423 u8 L0sExitLat[3]; /* DWORD 0 */
424 u8 L1ExitLat[3]; /* DWORD 0 */
425 u8 rsvd0[6]; /* DWORD 0 */
426 u8 PortNum[8]; /* DWORD 0 */
427} __packed;
428struct PCICFG_PCIE_LINK_CAP_CSR_AMAP {
429 u32 dw[1];
430};
431
432/* PCI Express Link Status Register. */
433struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
434 u8 ASPMCtl[2]; /* DWORD 0 */
435 u8 rsvd0; /* DWORD 0 */
436 u8 ReadCmplBndry; /* DWORD 0 */
437 u8 LinkDisable; /* DWORD 0 */
438 u8 RetrainLink; /* DWORD 0 */
439 u8 CommonClkConfig; /* DWORD 0 */
440 u8 ExtendSync; /* DWORD 0 */
441 u8 rsvd1[8]; /* DWORD 0 */
442 u8 LinkSpeed[4]; /* DWORD 0 */
443 u8 NegLinkWidth[6]; /* DWORD 0 */
444 u8 LinkTrainErr; /* DWORD 0 */
445 u8 LinkTrain; /* DWORD 0 */
446 u8 SlotClkConfig; /* DWORD 0 */
447 u8 rsvd2[3]; /* DWORD 0 */
448} __packed;
449struct PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
450 u32 dw[1];
451};
452
453/* PCI Express MSI Configuration Register. */
454struct BE_PCICFG_MSI_CSR_AMAP {
455 u8 capid[8]; /* DWORD 0 */
456 u8 nextptr[8]; /* DWORD 0 */
457 u8 tablesize[11]; /* DWORD 0 */
458 u8 rsvd0[3]; /* DWORD 0 */
459 u8 funcmask; /* DWORD 0 */
460 u8 en; /* DWORD 0 */
461} __packed;
462struct PCICFG_MSI_CSR_AMAP {
463 u32 dw[1];
464};
465
466/* MSI-X Table Offset Register. */
467struct BE_PCICFG_MSIX_TABLE_CSR_AMAP {
468 u8 tablebir[3]; /* DWORD 0 */
469 u8 offset[29]; /* DWORD 0 */
470} __packed;
471struct PCICFG_MSIX_TABLE_CSR_AMAP {
472 u32 dw[1];
473};
474
475/* MSI-X PBA Offset Register. */
476struct BE_PCICFG_MSIX_PBA_CSR_AMAP {
477 u8 pbabir[3]; /* DWORD 0 */
478 u8 offset[29]; /* DWORD 0 */
479} __packed;
480struct PCICFG_MSIX_PBA_CSR_AMAP {
481 u32 dw[1];
482};
483
484/* PCI Express MSI-X Message Vector Control Register. */
485struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
486 u8 vector_control; /* DWORD 0 */
487 u8 rsvd0[31]; /* DWORD 0 */
488} __packed;
489struct PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
490 u32 dw[1];
491};
492
493/* PCI Express MSI-X Message Data Register. */
494struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP {
495 u8 data[16]; /* DWORD 0 */
496 u8 rsvd0[16]; /* DWORD 0 */
497} __packed;
498struct PCICFG_MSIX_MSG_DATA_CSR_AMAP {
499 u32 dw[1];
500};
501
502/* PCI Express MSI-X Message Address Register - High Part. */
503struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
504 u8 addr[32]; /* DWORD 0 */
505} __packed;
506struct PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
507 u32 dw[1];
508};
509
510/* PCI Express MSI-X Message Address Register - Low Part. */
511struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
512 u8 rsvd0[2]; /* DWORD 0 */
513 u8 addr[30]; /* DWORD 0 */
514} __packed;
515struct PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
516 u32 dw[1];
517};
518
519struct BE_PCICFG_ANON_18_RSVD_AMAP {
520 u8 rsvd0[32]; /* DWORD 0 */
521} __packed;
522struct PCICFG_ANON_18_RSVD_AMAP {
523 u32 dw[1];
524};
525
526struct BE_PCICFG_ANON_19_RSVD_AMAP {
527 u8 rsvd0[32]; /* DWORD 0 */
528} __packed;
529struct PCICFG_ANON_19_RSVD_AMAP {
530 u32 dw[1];
531};
532
533struct BE_PCICFG_ANON_20_RSVD_AMAP {
534 u8 rsvd0[32]; /* DWORD 0 */
535 u8 rsvd1[25][32]; /* DWORD 1 */
536} __packed;
537struct PCICFG_ANON_20_RSVD_AMAP {
538 u32 dw[26];
539};
540
541struct BE_PCICFG_ANON_21_RSVD_AMAP {
542 u8 rsvd0[32]; /* DWORD 0 */
543 u8 rsvd1[1919][32]; /* DWORD 1 */
544} __packed;
545struct PCICFG_ANON_21_RSVD_AMAP {
546 u32 dw[1920];
547};
548
549struct BE_PCICFG_ANON_22_MESSAGE_AMAP {
550 struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
551 struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
552 struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
553 struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
554} __packed;
555struct PCICFG_ANON_22_MESSAGE_AMAP {
556 u32 dw[4];
557};
558
559struct BE_PCICFG_ANON_23_RSVD_AMAP {
560 u8 rsvd0[32]; /* DWORD 0 */
561 u8 rsvd1[895][32]; /* DWORD 1 */
562} __packed;
563struct PCICFG_ANON_23_RSVD_AMAP {
564 u32 dw[896];
565};
566
567/* These PCI Configuration Space registers are for the Storage Function of
568 * BladeEngine (Function 0). In the memory map of the registers below their
569 * table,
570 */
571struct BE_PCICFG0_CSRMAP_AMAP {
572 struct BE_PCICFG_ID_CSR_AMAP id;
573 u8 rsvd0[32]; /* DWORD 1 */
574 u8 rsvd1[32]; /* DWORD 2 */
575 u8 rsvd2[32]; /* DWORD 3 */
576 struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
577 struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
578 struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
579 struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
580 struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
581 struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
582 u8 rsvd3[32]; /* DWORD 10 */
583 struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP subsystem_id;
584 u8 rsvd4[32]; /* DWORD 12 */
585 u8 rsvd5[32]; /* DWORD 13 */
586 u8 rsvd6[32]; /* DWORD 14 */
587 u8 rsvd7[32]; /* DWORD 15 */
588 struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
589 struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
590 u8 rsvd8[32]; /* DWORD 21 */
591 struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
592 u8 rsvd9[32]; /* DWORD 23 */
593 u8 rsvd10[32]; /* DWORD 24 */
594 u8 rsvd11[32]; /* DWORD 25 */
595 u8 rsvd12[32]; /* DWORD 26 */
596 u8 rsvd13[32]; /* DWORD 27 */
597 u8 rsvd14[2][32]; /* DWORD 28 */
598 u8 rsvd15[32]; /* DWORD 30 */
599 u8 rsvd16[32]; /* DWORD 31 */
600 u8 rsvd17[8][32]; /* DWORD 32 */
601 struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
602 struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
603 struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
604 struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
605 struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
606 struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
607 u8 rsvd18[32]; /* DWORD 46 */
608 u8 rsvd19[32]; /* DWORD 47 */
609 u8 rsvd20[32]; /* DWORD 48 */
610 u8 rsvd21[32]; /* DWORD 49 */
611 struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
612 u8 rsvd22[32]; /* DWORD 51 */
613 struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
614 struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
615 struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
616 struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
617 struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
618 struct BE_PCICFG_MSI_CSR_AMAP msi;
619 struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
620 struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
621 u8 rsvd23[32]; /* DWORD 60 */
622 u8 rsvd24[32]; /* DWORD 61 */
623 u8 rsvd25[32]; /* DWORD 62 */
624 u8 rsvd26[32]; /* DWORD 63 */
625 u8 rsvd27[32]; /* DWORD 64 */
626 u8 rsvd28[32]; /* DWORD 65 */
627 u8 rsvd29[32]; /* DWORD 66 */
628 u8 rsvd30[32]; /* DWORD 67 */
629 u8 rsvd31[32]; /* DWORD 68 */
630 u8 rsvd32[32]; /* DWORD 69 */
631 u8 rsvd33[32]; /* DWORD 70 */
632 u8 rsvd34[32]; /* DWORD 71 */
633 u8 rsvd35[32]; /* DWORD 72 */
634 u8 rsvd36[32]; /* DWORD 73 */
635 u8 rsvd37[32]; /* DWORD 74 */
636 u8 rsvd38[32]; /* DWORD 75 */
637 u8 rsvd39[32]; /* DWORD 76 */
638 u8 rsvd40[32]; /* DWORD 77 */
639 u8 rsvd41[32]; /* DWORD 78 */
640 u8 rsvd42[32]; /* DWORD 79 */
641 u8 rsvd43[32]; /* DWORD 80 */
642 u8 rsvd44[32]; /* DWORD 81 */
643 u8 rsvd45[32]; /* DWORD 82 */
644 u8 rsvd46[32]; /* DWORD 83 */
645 u8 rsvd47[32]; /* DWORD 84 */
646 u8 rsvd48[32]; /* DWORD 85 */
647 u8 rsvd49[32]; /* DWORD 86 */
648 u8 rsvd50[32]; /* DWORD 87 */
649 u8 rsvd51[32]; /* DWORD 88 */
650 u8 rsvd52[32]; /* DWORD 89 */
651 u8 rsvd53[32]; /* DWORD 90 */
652 u8 rsvd54[32]; /* DWORD 91 */
653 u8 rsvd55[32]; /* DWORD 92 */
654 u8 rsvd56[832]; /* DWORD 93 */
655 u8 rsvd57[32]; /* DWORD 119 */
656 u8 rsvd58[32]; /* DWORD 120 */
657 u8 rsvd59[32]; /* DWORD 121 */
658 u8 rsvd60[32]; /* DWORD 122 */
659 u8 rsvd61[32]; /* DWORD 123 */
660 u8 rsvd62[32]; /* DWORD 124 */
661 u8 rsvd63[32]; /* DWORD 125 */
662 u8 rsvd64[32]; /* DWORD 126 */
663 u8 rsvd65[32]; /* DWORD 127 */
664 u8 rsvd66[61440]; /* DWORD 128 */
665 struct BE_PCICFG_ANON_22_MESSAGE_AMAP message[32];
666 u8 rsvd67[28672]; /* DWORD 2176 */
667 u8 rsvd68[32]; /* DWORD 3072 */
668 u8 rsvd69[1023][32]; /* DWORD 3073 */
669} __packed;
670struct PCICFG0_CSRMAP_AMAP {
671 u32 dw[4096];
672};
673
674struct BE_PCICFG_ANON_24_RSVD_AMAP {
675 u8 rsvd0[32]; /* DWORD 0 */
676} __packed;
677struct PCICFG_ANON_24_RSVD_AMAP {
678 u32 dw[1];
679};
680
681struct BE_PCICFG_ANON_25_RSVD_AMAP {
682 u8 rsvd0[32]; /* DWORD 0 */
683} __packed;
684struct PCICFG_ANON_25_RSVD_AMAP {
685 u32 dw[1];
686};
687
688struct BE_PCICFG_ANON_26_RSVD_AMAP {
689 u8 rsvd0[32]; /* DWORD 0 */
690} __packed;
691struct PCICFG_ANON_26_RSVD_AMAP {
692 u32 dw[1];
693};
694
695struct BE_PCICFG_ANON_27_RSVD_AMAP {
696 u8 rsvd0[32]; /* DWORD 0 */
697 u8 rsvd1[32]; /* DWORD 1 */
698} __packed;
699struct PCICFG_ANON_27_RSVD_AMAP {
700 u32 dw[2];
701};
702
703struct BE_PCICFG_ANON_28_RSVD_AMAP {
704 u8 rsvd0[32]; /* DWORD 0 */
705 u8 rsvd1[3][32]; /* DWORD 1 */
706} __packed;
707struct PCICFG_ANON_28_RSVD_AMAP {
708 u32 dw[4];
709};
710
711struct BE_PCICFG_ANON_29_RSVD_AMAP {
712 u8 rsvd0[32]; /* DWORD 0 */
713 u8 rsvd1[36][32]; /* DWORD 1 */
714} __packed;
715struct PCICFG_ANON_29_RSVD_AMAP {
716 u32 dw[37];
717};
718
719struct BE_PCICFG_ANON_30_RSVD_AMAP {
720 u8 rsvd0[32]; /* DWORD 0 */
721 u8 rsvd1[1930][32]; /* DWORD 1 */
722} __packed;
723struct PCICFG_ANON_30_RSVD_AMAP {
724 u32 dw[1931];
725};
726
727struct BE_PCICFG_ANON_31_MESSAGE_AMAP {
728 struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
729 struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
730 struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
731 struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
732} __packed;
733struct PCICFG_ANON_31_MESSAGE_AMAP {
734 u32 dw[4];
735};
736
737struct BE_PCICFG_ANON_32_RSVD_AMAP {
738 u8 rsvd0[32]; /* DWORD 0 */
739 u8 rsvd1[895][32]; /* DWORD 1 */
740} __packed;
741struct PCICFG_ANON_32_RSVD_AMAP {
742 u32 dw[896];
743};
744
745/* This PCI configuration space register map is for the Networking Function of
746 * BladeEngine (Function 1).
747 */
748struct BE_PCICFG1_CSRMAP_AMAP {
749 struct BE_PCICFG_ID_CSR_AMAP id;
750 u8 rsvd0[32]; /* DWORD 1 */
751 u8 rsvd1[32]; /* DWORD 2 */
752 u8 rsvd2[32]; /* DWORD 3 */
753 struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
754 struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
755 struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
756 struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
757 struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
758 struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
759 u8 rsvd3[32]; /* DWORD 10 */
760 struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP subsystem_id;
761 u8 rsvd4[32]; /* DWORD 12 */
762 u8 rsvd5[32]; /* DWORD 13 */
763 u8 rsvd6[32]; /* DWORD 14 */
764 u8 rsvd7[32]; /* DWORD 15 */
765 struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
766 struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
767 u8 rsvd8[32]; /* DWORD 21 */
768 struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
769 u8 rsvd9[32]; /* DWORD 23 */
770 u8 rsvd10[32]; /* DWORD 24 */
771 u8 rsvd11[32]; /* DWORD 25 */
772 u8 rsvd12[32]; /* DWORD 26 */
773 u8 rsvd13[32]; /* DWORD 27 */
774 u8 rsvd14[2][32]; /* DWORD 28 */
775 u8 rsvd15[32]; /* DWORD 30 */
776 u8 rsvd16[32]; /* DWORD 31 */
777 u8 rsvd17[8][32]; /* DWORD 32 */
778 struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
779 struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
780 struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
781 struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
782 struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
783 struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
784 u8 rsvd18[32]; /* DWORD 46 */
785 u8 rsvd19[32]; /* DWORD 47 */
786 u8 rsvd20[32]; /* DWORD 48 */
787 u8 rsvd21[32]; /* DWORD 49 */
788 struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
789 u8 rsvd22[32]; /* DWORD 51 */
790 struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
791 struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
792 struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
793 struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
794 struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
795 struct BE_PCICFG_MSI_CSR_AMAP msi;
796 struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
797 struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
798 u8 rsvd23[64]; /* DWORD 60 */
799 u8 rsvd24[32]; /* DWORD 62 */
800 u8 rsvd25[32]; /* DWORD 63 */
801 u8 rsvd26[32]; /* DWORD 64 */
802 u8 rsvd27[32]; /* DWORD 65 */
803 u8 rsvd28[32]; /* DWORD 66 */
804 u8 rsvd29[32]; /* DWORD 67 */
805 u8 rsvd30[32]; /* DWORD 68 */
806 u8 rsvd31[32]; /* DWORD 69 */
807 u8 rsvd32[32]; /* DWORD 70 */
808 u8 rsvd33[32]; /* DWORD 71 */
809 u8 rsvd34[32]; /* DWORD 72 */
810 u8 rsvd35[32]; /* DWORD 73 */
811 u8 rsvd36[32]; /* DWORD 74 */
812 u8 rsvd37[128]; /* DWORD 75 */
813 u8 rsvd38[32]; /* DWORD 79 */
814 u8 rsvd39[1184]; /* DWORD 80 */
815 u8 rsvd40[61792]; /* DWORD 117 */
816 struct BE_PCICFG_ANON_31_MESSAGE_AMAP message[32];
817 u8 rsvd41[28672]; /* DWORD 2176 */
818 u8 rsvd42[32]; /* DWORD 3072 */
819 u8 rsvd43[1023][32]; /* DWORD 3073 */
820} __packed;
821struct PCICFG1_CSRMAP_AMAP {
822 u32 dw[4096];
823};
824
825#endif /* __pcicfg_amap_h__ */
diff --git a/drivers/staging/benet/post_codes.h b/drivers/staging/benet/post_codes.h
deleted file mode 100644
index 6d1621f5f5fb..000000000000
--- a/drivers/staging/benet/post_codes.h
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __post_codes_amap_h__
21#define __post_codes_amap_h__
22
23/* --- MGMT_HBA_POST_STAGE_ENUM --- */
24#define POST_STAGE_POWER_ON_RESET (0) /* State after a cold or warm boot. */
25#define POST_STAGE_AWAITING_HOST_RDY (1) /* ARM boot code awaiting a
26 go-ahed from the host. */
27#define POST_STAGE_HOST_RDY (2) /* Host has given go-ahed to ARM. */
28#define POST_STAGE_BE_RESET (3) /* Host wants to reset chip, this is a chip
29 workaround */
30#define POST_STAGE_SEEPROM_CS_START (256) /* SEEPROM checksum
31 test start. */
32#define POST_STAGE_SEEPROM_CS_DONE (257) /* SEEPROM checksum test
33 done. */
34#define POST_STAGE_DDR_CONFIG_START (512) /* DDR configuration start. */
35#define POST_STAGE_DDR_CONFIG_DONE (513) /* DDR configuration done. */
36#define POST_STAGE_DDR_CALIBRATE_START (768) /* DDR calibration start. */
37#define POST_STAGE_DDR_CALIBRATE_DONE (769) /* DDR calibration done. */
38#define POST_STAGE_DDR_TEST_START (1024) /* DDR memory test start. */
39#define POST_STAGE_DDR_TEST_DONE (1025) /* DDR memory test done. */
40#define POST_STAGE_REDBOOT_INIT_START (1536) /* Redboot starts execution. */
41#define POST_STAGE_REDBOOT_INIT_DONE (1537) /* Redboot done execution. */
42#define POST_STAGE_FW_IMAGE_LOAD_START (1792) /* Firmware image load to
43 DDR start. */
44#define POST_STAGE_FW_IMAGE_LOAD_DONE (1793) /* Firmware image load
45 to DDR done. */
46#define POST_STAGE_ARMFW_START (2048) /* ARMfw runtime code
47 starts execution. */
48#define POST_STAGE_DHCP_QUERY_START (2304) /* DHCP server query start. */
49#define POST_STAGE_DHCP_QUERY_DONE (2305) /* DHCP server query done. */
50#define POST_STAGE_BOOT_TARGET_DISCOVERY_START (2560) /* Boot Target
51 Discovery Start. */
52#define POST_STAGE_BOOT_TARGET_DISCOVERY_DONE (2561) /* Boot Target
53 Discovery Done. */
54#define POST_STAGE_RC_OPTION_SET (2816) /* Remote configuration
55 option is set in SEEPROM */
56#define POST_STAGE_SWITCH_LINK (2817) /* Wait for link up on switch */
57#define POST_STAGE_SEND_ICDS_MESSAGE (2818) /* Send the ICDS message
58 to switch */
59#define POST_STAGE_PERFROM_TFTP (2819) /* Download xml using TFTP */
60#define POST_STAGE_PARSE_XML (2820) /* Parse XML file */
61#define POST_STAGE_DOWNLOAD_IMAGE (2821) /* Download IMAGE from
62 TFTP server */
63#define POST_STAGE_FLASH_IMAGE (2822) /* Flash the IMAGE */
64#define POST_STAGE_RC_DONE (2823) /* Remote configuration
65 complete */
66#define POST_STAGE_REBOOT_SYSTEM (2824) /* Upgrade IMAGE done,
67 reboot required */
68#define POST_STAGE_MAC_ADDRESS (3072) /* MAC Address Check */
69#define POST_STAGE_ARMFW_READY (49152) /* ARMfw is done with POST
70 and ready. */
71#define POST_STAGE_ARMFW_UE (61440) /* ARMfw has asserted an
72 unrecoverable error. The
73 lower 3 hex digits of the
74 stage code identify the
75 unique error code.
76 */
77
78/* This structure defines the format of the MPU semaphore
79 * register when used for POST.
80 */
81struct BE_MGMT_HBA_POST_STATUS_STRUCT_AMAP {
82 u8 stage[16]; /* DWORD 0 */
83 u8 rsvd0[10]; /* DWORD 0 */
84 u8 iscsi_driver_loaded; /* DWORD 0 */
85 u8 option_rom_installed; /* DWORD 0 */
86 u8 iscsi_ip_conflict; /* DWORD 0 */
87 u8 iscsi_no_ip; /* DWORD 0 */
88 u8 backup_fw; /* DWORD 0 */
89 u8 error; /* DWORD 0 */
90} __packed;
91struct MGMT_HBA_POST_STATUS_STRUCT_AMAP {
92 u32 dw[1];
93};
94
95/* --- MGMT_HBA_POST_DUMMY_BITS_ENUM --- */
96#define POST_BIT_ISCSI_LOADED (26)
97#define POST_BIT_OPTROM_INST (27)
98#define POST_BIT_BAD_IP_ADDR (28)
99#define POST_BIT_NO_IP_ADDR (29)
100#define POST_BIT_BACKUP_FW (30)
101#define POST_BIT_ERROR (31)
102
103/* --- MGMT_HBA_POST_DUMMY_VALUES_ENUM --- */
104#define POST_ISCSI_DRIVER_LOADED (67108864)
105#define POST_OPTROM_INSTALLED (134217728)
106#define POST_ISCSI_IP_ADDRESS_CONFLICT (268435456)
107#define POST_ISCSI_NO_IP_ADDRESS (536870912)
108#define POST_BACKUP_FW_LOADED (1073741824)
109#define POST_FATAL_ERROR (2147483648)
110
111#endif /* __post_codes_amap_h__ */
diff --git a/drivers/staging/benet/regmap.h b/drivers/staging/benet/regmap.h
deleted file mode 100644
index e816ba210e83..000000000000
--- a/drivers/staging/benet/regmap.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17/*
18 * Autogenerated by srcgen version: 0127
19 */
20#ifndef __regmap_amap_h__
21#define __regmap_amap_h__
22#include "pcicfg.h"
23#include "ep.h"
24#include "cev.h"
25#include "mpu.h"
26#include "doorbells.h"
27
28/*
29 * This is the control and status register map for BladeEngine, showing
30 * the relative size and offset of each sub-module. The CSR registers
31 * are identical for the network and storage PCI functions. The
32 * CSR map is shown below, followed by details of each block,
33 * in sub-sections. The sub-sections begin with a description
34 * of CSRs that are instantiated in multiple blocks.
35 */
36struct BE_BLADE_ENGINE_CSRMAP_AMAP {
37 struct BE_MPU_CSRMAP_AMAP mpu;
38 u8 rsvd0[8192]; /* DWORD 256 */
39 u8 rsvd1[8192]; /* DWORD 512 */
40 struct BE_CEV_CSRMAP_AMAP cev;
41 u8 rsvd2[8192]; /* DWORD 1024 */
42 u8 rsvd3[8192]; /* DWORD 1280 */
43 u8 rsvd4[8192]; /* DWORD 1536 */
44 u8 rsvd5[8192]; /* DWORD 1792 */
45 u8 rsvd6[8192]; /* DWORD 2048 */
46 u8 rsvd7[8192]; /* DWORD 2304 */
47 u8 rsvd8[8192]; /* DWORD 2560 */
48 u8 rsvd9[8192]; /* DWORD 2816 */
49 u8 rsvd10[8192]; /* DWORD 3072 */
50 u8 rsvd11[8192]; /* DWORD 3328 */
51 u8 rsvd12[8192]; /* DWORD 3584 */
52 u8 rsvd13[8192]; /* DWORD 3840 */
53 u8 rsvd14[8192]; /* DWORD 4096 */
54 u8 rsvd15[8192]; /* DWORD 4352 */
55 u8 rsvd16[8192]; /* DWORD 4608 */
56 u8 rsvd17[8192]; /* DWORD 4864 */
57 u8 rsvd18[8192]; /* DWORD 5120 */
58 u8 rsvd19[8192]; /* DWORD 5376 */
59 u8 rsvd20[8192]; /* DWORD 5632 */
60 u8 rsvd21[8192]; /* DWORD 5888 */
61 u8 rsvd22[8192]; /* DWORD 6144 */
62 u8 rsvd23[17152][32]; /* DWORD 6400 */
63} __packed;
64struct BLADE_ENGINE_CSRMAP_AMAP {
65 u32 dw[23552];
66};
67
68#endif /* __regmap_amap_h__ */