diff options
author | Tejun Heo <tj@kernel.org> | 2009-07-03 18:13:18 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-07-03 18:13:18 -0400 |
commit | c43768cbb7655ea5ff782ae250f6e2ef4297cf98 (patch) | |
tree | 3982e41dde3eecaa3739a5d1a8ed18d04bd74f01 /drivers/net | |
parent | 1a8dd307cc0a2119be4e578c517795464e6dabba (diff) | |
parent | 746a99a5af60ee676afa2ba469ccd1373493c7e7 (diff) |
Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix
changes. As alpha in percpu tree uses 'weak' attribute instead of
inline assembly, there's no need for __used attribute.
Conflicts:
arch/alpha/include/asm/percpu.h
arch/mn10300/kernel/vmlinux.lds.S
include/linux/percpu-defs.h
Diffstat (limited to 'drivers/net')
60 files changed, 2137 insertions, 569 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 892a9e4e275f..c155bd3ec9f1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1725,6 +1725,7 @@ config TLAN | |||
1725 | 1725 | ||
1726 | config KS8842 | 1726 | config KS8842 |
1727 | tristate "Micrel KSZ8842" | 1727 | tristate "Micrel KSZ8842" |
1728 | depends on HAS_IOMEM | ||
1728 | help | 1729 | help |
1729 | This platform driver is for Micrel KSZ8842 chip. | 1730 | This platform driver is for Micrel KSZ8842 chip. |
1730 | 1731 | ||
@@ -2443,6 +2444,17 @@ config JME | |||
2443 | To compile this driver as a module, choose M here. The module | 2444 | To compile this driver as a module, choose M here. The module |
2444 | will be called jme. | 2445 | will be called jme. |
2445 | 2446 | ||
2447 | config S6GMAC | ||
2448 | tristate "S6105 GMAC ethernet support" | ||
2449 | depends on XTENSA_VARIANT_S6000 | ||
2450 | select PHYLIB | ||
2451 | help | ||
2452 | This driver supports the on chip ethernet device on the | ||
2453 | S6105 xtensa processor. | ||
2454 | |||
2455 | To compile this driver as a module, choose M here. The module | ||
2456 | will be called s6gmac. | ||
2457 | |||
2446 | endif # NETDEV_1000 | 2458 | endif # NETDEV_1000 |
2447 | 2459 | ||
2448 | # | 2460 | # |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index d366fb2b40e9..4b58a59f211b 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -245,6 +245,7 @@ obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o | |||
245 | 245 | ||
246 | obj-$(CONFIG_DNET) += dnet.o | 246 | obj-$(CONFIG_DNET) += dnet.o |
247 | obj-$(CONFIG_MACB) += macb.o | 247 | obj-$(CONFIG_MACB) += macb.o |
248 | obj-$(CONFIG_S6GMAC) += s6gmac.o | ||
248 | 249 | ||
249 | obj-$(CONFIG_ARM) += arm/ | 250 | obj-$(CONFIG_ARM) += arm/ |
250 | obj-$(CONFIG_DEV_APPLETALK) += appletalk/ | 251 | obj-$(CONFIG_DEV_APPLETALK) += appletalk/ |
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c index e4afbd628c23..607007d75b6f 100644 --- a/drivers/net/atl1c/atl1c_ethtool.c +++ b/drivers/net/atl1c/atl1c_ethtool.c | |||
@@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
281 | if (wol->wolopts & WAKE_PHY) | 281 | if (wol->wolopts & WAKE_PHY) |
282 | adapter->wol |= AT_WUFC_LNKC; | 282 | adapter->wol |= AT_WUFC_LNKC; |
283 | 283 | ||
284 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | ||
285 | |||
284 | return 0; | 286 | return 0; |
285 | } | 287 | } |
286 | 288 | ||
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c index 619c6583e1aa..4003955d7a96 100644 --- a/drivers/net/atl1e/atl1e_ethtool.c +++ b/drivers/net/atl1e/atl1e_ethtool.c | |||
@@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
365 | if (wol->wolopts & WAKE_PHY) | 365 | if (wol->wolopts & WAKE_PHY) |
366 | adapter->wol |= AT_WUFC_LNKC; | 366 | adapter->wol |= AT_WUFC_LNKC; |
367 | 367 | ||
368 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | ||
369 | |||
368 | return 0; | 370 | return 0; |
369 | } | 371 | } |
370 | 372 | ||
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index b4bb06fdf307..5b4bf3d2cdc2 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
@@ -65,7 +65,7 @@ static inline char *nic_name(struct pci_dev *pdev) | |||
65 | #define TX_CQ_LEN 1024 | 65 | #define TX_CQ_LEN 1024 |
66 | #define RX_Q_LEN 1024 /* Does not support any other value */ | 66 | #define RX_Q_LEN 1024 /* Does not support any other value */ |
67 | #define RX_CQ_LEN 1024 | 67 | #define RX_CQ_LEN 1024 |
68 | #define MCC_Q_LEN 64 /* total size not to exceed 8 pages */ | 68 | #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ |
69 | #define MCC_CQ_LEN 256 | 69 | #define MCC_CQ_LEN 256 |
70 | 70 | ||
71 | #define BE_NAPI_WEIGHT 64 | 71 | #define BE_NAPI_WEIGHT 64 |
@@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev) | |||
73 | #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) | 73 | #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) |
74 | 74 | ||
75 | #define BE_MAX_LRO_DESCRIPTORS 16 | 75 | #define BE_MAX_LRO_DESCRIPTORS 16 |
76 | #define BE_MAX_FRAGS_PER_FRAME 16 | 76 | #define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS)) |
77 | 77 | ||
78 | struct be_dma_mem { | 78 | struct be_dma_mem { |
79 | void *va; | 79 | void *va; |
@@ -91,6 +91,61 @@ struct be_queue_info { | |||
91 | atomic_t used; /* Number of valid elements in the queue */ | 91 | atomic_t used; /* Number of valid elements in the queue */ |
92 | }; | 92 | }; |
93 | 93 | ||
94 | static inline u32 MODULO(u16 val, u16 limit) | ||
95 | { | ||
96 | BUG_ON(limit & (limit - 1)); | ||
97 | return val & (limit - 1); | ||
98 | } | ||
99 | |||
100 | static inline void index_adv(u16 *index, u16 val, u16 limit) | ||
101 | { | ||
102 | *index = MODULO((*index + val), limit); | ||
103 | } | ||
104 | |||
105 | static inline void index_inc(u16 *index, u16 limit) | ||
106 | { | ||
107 | *index = MODULO((*index + 1), limit); | ||
108 | } | ||
109 | |||
110 | static inline void *queue_head_node(struct be_queue_info *q) | ||
111 | { | ||
112 | return q->dma_mem.va + q->head * q->entry_size; | ||
113 | } | ||
114 | |||
115 | static inline void *queue_tail_node(struct be_queue_info *q) | ||
116 | { | ||
117 | return q->dma_mem.va + q->tail * q->entry_size; | ||
118 | } | ||
119 | |||
120 | static inline void queue_head_inc(struct be_queue_info *q) | ||
121 | { | ||
122 | index_inc(&q->head, q->len); | ||
123 | } | ||
124 | |||
125 | static inline void queue_tail_inc(struct be_queue_info *q) | ||
126 | { | ||
127 | index_inc(&q->tail, q->len); | ||
128 | } | ||
129 | |||
130 | |||
131 | struct be_eq_obj { | ||
132 | struct be_queue_info q; | ||
133 | char desc[32]; | ||
134 | |||
135 | /* Adaptive interrupt coalescing (AIC) info */ | ||
136 | bool enable_aic; | ||
137 | u16 min_eqd; /* in usecs */ | ||
138 | u16 max_eqd; /* in usecs */ | ||
139 | u16 cur_eqd; /* in usecs */ | ||
140 | |||
141 | struct napi_struct napi; | ||
142 | }; | ||
143 | |||
144 | struct be_mcc_obj { | ||
145 | struct be_queue_info q; | ||
146 | struct be_queue_info cq; | ||
147 | }; | ||
148 | |||
94 | struct be_ctrl_info { | 149 | struct be_ctrl_info { |
95 | u8 __iomem *csr; | 150 | u8 __iomem *csr; |
96 | u8 __iomem *db; /* Door Bell */ | 151 | u8 __iomem *db; /* Door Bell */ |
@@ -98,11 +153,20 @@ struct be_ctrl_info { | |||
98 | int pci_func; | 153 | int pci_func; |
99 | 154 | ||
100 | /* Mbox used for cmd request/response */ | 155 | /* Mbox used for cmd request/response */ |
101 | spinlock_t cmd_lock; /* For serializing cmds to BE card */ | 156 | spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */ |
102 | struct be_dma_mem mbox_mem; | 157 | struct be_dma_mem mbox_mem; |
103 | /* Mbox mem is adjusted to align to 16 bytes. The allocated addr | 158 | /* Mbox mem is adjusted to align to 16 bytes. The allocated addr |
104 | * is stored for freeing purpose */ | 159 | * is stored for freeing purpose */ |
105 | struct be_dma_mem mbox_mem_alloced; | 160 | struct be_dma_mem mbox_mem_alloced; |
161 | |||
162 | /* MCC Rings */ | ||
163 | struct be_mcc_obj mcc_obj; | ||
164 | spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ | ||
165 | spinlock_t mcc_cq_lock; | ||
166 | |||
167 | /* MCC Async callback */ | ||
168 | void (*async_cb)(void *adapter, bool link_up); | ||
169 | void *adapter_ctxt; | ||
106 | }; | 170 | }; |
107 | 171 | ||
108 | #include "be_cmds.h" | 172 | #include "be_cmds.h" |
@@ -150,19 +214,6 @@ struct be_stats_obj { | |||
150 | struct be_dma_mem cmd; | 214 | struct be_dma_mem cmd; |
151 | }; | 215 | }; |
152 | 216 | ||
153 | struct be_eq_obj { | ||
154 | struct be_queue_info q; | ||
155 | char desc[32]; | ||
156 | |||
157 | /* Adaptive interrupt coalescing (AIC) info */ | ||
158 | bool enable_aic; | ||
159 | u16 min_eqd; /* in usecs */ | ||
160 | u16 max_eqd; /* in usecs */ | ||
161 | u16 cur_eqd; /* in usecs */ | ||
162 | |||
163 | struct napi_struct napi; | ||
164 | }; | ||
165 | |||
166 | struct be_tx_obj { | 217 | struct be_tx_obj { |
167 | struct be_queue_info q; | 218 | struct be_queue_info q; |
168 | struct be_queue_info cq; | 219 | struct be_queue_info cq; |
@@ -225,8 +276,9 @@ struct be_adapter { | |||
225 | u32 if_handle; /* Used to configure filtering */ | 276 | u32 if_handle; /* Used to configure filtering */ |
226 | u32 pmac_id; /* MAC addr handle used by BE card */ | 277 | u32 pmac_id; /* MAC addr handle used by BE card */ |
227 | 278 | ||
228 | struct be_link_info link; | 279 | bool link_up; |
229 | u32 port_num; | 280 | u32 port_num; |
281 | bool promiscuous; | ||
230 | }; | 282 | }; |
231 | 283 | ||
232 | extern struct ethtool_ops be_ethtool_ops; | 284 | extern struct ethtool_ops be_ethtool_ops; |
@@ -235,22 +287,6 @@ extern struct ethtool_ops be_ethtool_ops; | |||
235 | 287 | ||
236 | #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) | 288 | #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) |
237 | 289 | ||
238 | static inline u32 MODULO(u16 val, u16 limit) | ||
239 | { | ||
240 | BUG_ON(limit & (limit - 1)); | ||
241 | return val & (limit - 1); | ||
242 | } | ||
243 | |||
244 | static inline void index_adv(u16 *index, u16 val, u16 limit) | ||
245 | { | ||
246 | *index = MODULO((*index + val), limit); | ||
247 | } | ||
248 | |||
249 | static inline void index_inc(u16 *index, u16 limit) | ||
250 | { | ||
251 | *index = MODULO((*index + 1), limit); | ||
252 | } | ||
253 | |||
254 | #define PAGE_SHIFT_4K 12 | 290 | #define PAGE_SHIFT_4K 12 |
255 | #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) | 291 | #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) |
256 | 292 | ||
@@ -339,4 +375,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb) | |||
339 | return val; | 375 | return val; |
340 | } | 376 | } |
341 | 377 | ||
378 | extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, | ||
379 | u16 num_popped); | ||
342 | #endif /* BE_H */ | 380 | #endif /* BE_H */ |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index d444aed962bc..583517ed56f0 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -17,6 +17,133 @@ | |||
17 | 17 | ||
18 | #include "be.h" | 18 | #include "be.h" |
19 | 19 | ||
20 | static void be_mcc_notify(struct be_ctrl_info *ctrl) | ||
21 | { | ||
22 | struct be_queue_info *mccq = &ctrl->mcc_obj.q; | ||
23 | u32 val = 0; | ||
24 | |||
25 | val |= mccq->id & DB_MCCQ_RING_ID_MASK; | ||
26 | val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; | ||
27 | iowrite32(val, ctrl->db + DB_MCCQ_OFFSET); | ||
28 | } | ||
29 | |||
30 | /* To check if valid bit is set, check the entire word as we don't know | ||
31 | * the endianness of the data (old entry is host endian while a new entry is | ||
32 | * little endian) */ | ||
33 | static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl) | ||
34 | { | ||
35 | if (compl->flags != 0) { | ||
36 | compl->flags = le32_to_cpu(compl->flags); | ||
37 | BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); | ||
38 | return true; | ||
39 | } else { | ||
40 | return false; | ||
41 | } | ||
42 | } | ||
43 | |||
44 | /* Need to reset the entire word that houses the valid bit */ | ||
45 | static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl) | ||
46 | { | ||
47 | compl->flags = 0; | ||
48 | } | ||
49 | |||
50 | static int be_mcc_compl_process(struct be_ctrl_info *ctrl, | ||
51 | struct be_mcc_cq_entry *compl) | ||
52 | { | ||
53 | u16 compl_status, extd_status; | ||
54 | |||
55 | /* Just swap the status to host endian; mcc tag is opaquely copied | ||
56 | * from mcc_wrb */ | ||
57 | be_dws_le_to_cpu(compl, 4); | ||
58 | |||
59 | compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & | ||
60 | CQE_STATUS_COMPL_MASK; | ||
61 | if (compl_status != MCC_STATUS_SUCCESS) { | ||
62 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & | ||
63 | CQE_STATUS_EXTD_MASK; | ||
64 | printk(KERN_WARNING DRV_NAME | ||
65 | " error in cmd completion: status(compl/extd)=%d/%d\n", | ||
66 | compl_status, extd_status); | ||
67 | return -1; | ||
68 | } | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | /* Link state evt is a string of bytes; no need for endian swapping */ | ||
73 | static void be_async_link_state_process(struct be_ctrl_info *ctrl, | ||
74 | struct be_async_event_link_state *evt) | ||
75 | { | ||
76 | ctrl->async_cb(ctrl->adapter_ctxt, | ||
77 | evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false); | ||
78 | } | ||
79 | |||
80 | static inline bool is_link_state_evt(u32 trailer) | ||
81 | { | ||
82 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & | ||
83 | ASYNC_TRAILER_EVENT_CODE_MASK) == | ||
84 | ASYNC_EVENT_CODE_LINK_STATE); | ||
85 | } | ||
86 | |||
87 | static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl) | ||
88 | { | ||
89 | struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq; | ||
90 | struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq); | ||
91 | |||
92 | if (be_mcc_compl_is_new(compl)) { | ||
93 | queue_tail_inc(mcc_cq); | ||
94 | return compl; | ||
95 | } | ||
96 | return NULL; | ||
97 | } | ||
98 | |||
99 | void be_process_mcc(struct be_ctrl_info *ctrl) | ||
100 | { | ||
101 | struct be_mcc_cq_entry *compl; | ||
102 | int num = 0; | ||
103 | |||
104 | spin_lock_bh(&ctrl->mcc_cq_lock); | ||
105 | while ((compl = be_mcc_compl_get(ctrl))) { | ||
106 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { | ||
107 | /* Interpret flags as an async trailer */ | ||
108 | BUG_ON(!is_link_state_evt(compl->flags)); | ||
109 | |||
110 | /* Interpret compl as a async link evt */ | ||
111 | be_async_link_state_process(ctrl, | ||
112 | (struct be_async_event_link_state *) compl); | ||
113 | } else { | ||
114 | be_mcc_compl_process(ctrl, compl); | ||
115 | atomic_dec(&ctrl->mcc_obj.q.used); | ||
116 | } | ||
117 | be_mcc_compl_use(compl); | ||
118 | num++; | ||
119 | } | ||
120 | if (num) | ||
121 | be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num); | ||
122 | spin_unlock_bh(&ctrl->mcc_cq_lock); | ||
123 | } | ||
124 | |||
125 | /* Wait till no more pending mcc requests are present */ | ||
126 | static void be_mcc_wait_compl(struct be_ctrl_info *ctrl) | ||
127 | { | ||
128 | #define mcc_timeout 50000 /* 5s timeout */ | ||
129 | int i; | ||
130 | for (i = 0; i < mcc_timeout; i++) { | ||
131 | be_process_mcc(ctrl); | ||
132 | if (atomic_read(&ctrl->mcc_obj.q.used) == 0) | ||
133 | break; | ||
134 | udelay(100); | ||
135 | } | ||
136 | if (i == mcc_timeout) | ||
137 | printk(KERN_WARNING DRV_NAME "mcc poll timed out\n"); | ||
138 | } | ||
139 | |||
140 | /* Notify MCC requests and wait for completion */ | ||
141 | static void be_mcc_notify_wait(struct be_ctrl_info *ctrl) | ||
142 | { | ||
143 | be_mcc_notify(ctrl); | ||
144 | be_mcc_wait_compl(ctrl); | ||
145 | } | ||
146 | |||
20 | static int be_mbox_db_ready_wait(void __iomem *db) | 147 | static int be_mbox_db_ready_wait(void __iomem *db) |
21 | { | 148 | { |
22 | int cnt = 0, wait = 5; | 149 | int cnt = 0, wait = 5; |
@@ -44,11 +171,11 @@ static int be_mbox_db_ready_wait(void __iomem *db) | |||
44 | 171 | ||
45 | /* | 172 | /* |
46 | * Insert the mailbox address into the doorbell in two steps | 173 | * Insert the mailbox address into the doorbell in two steps |
174 | * Polls on the mbox doorbell till a command completion (or a timeout) occurs | ||
47 | */ | 175 | */ |
48 | static int be_mbox_db_ring(struct be_ctrl_info *ctrl) | 176 | static int be_mbox_db_ring(struct be_ctrl_info *ctrl) |
49 | { | 177 | { |
50 | int status; | 178 | int status; |
51 | u16 compl_status, extd_status; | ||
52 | u32 val = 0; | 179 | u32 val = 0; |
53 | void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; | 180 | void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; |
54 | struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; | 181 | struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; |
@@ -79,24 +206,17 @@ static int be_mbox_db_ring(struct be_ctrl_info *ctrl) | |||
79 | if (status != 0) | 206 | if (status != 0) |
80 | return status; | 207 | return status; |
81 | 208 | ||
82 | /* compl entry has been made now */ | 209 | /* A cq entry has been made now */ |
83 | be_dws_le_to_cpu(cqe, sizeof(*cqe)); | 210 | if (be_mcc_compl_is_new(cqe)) { |
84 | if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) { | 211 | status = be_mcc_compl_process(ctrl, &mbox->cqe); |
85 | printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n"); | 212 | be_mcc_compl_use(cqe); |
213 | if (status) | ||
214 | return status; | ||
215 | } else { | ||
216 | printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n"); | ||
86 | return -1; | 217 | return -1; |
87 | } | 218 | } |
88 | 219 | return 0; | |
89 | compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) & | ||
90 | CQE_STATUS_COMPL_MASK; | ||
91 | if (compl_status != MCC_STATUS_SUCCESS) { | ||
92 | extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) & | ||
93 | CQE_STATUS_EXTD_MASK; | ||
94 | printk(KERN_WARNING DRV_NAME | ||
95 | ": ERROR in cmd compl. status(compl/extd)=%d/%d\n", | ||
96 | compl_status, extd_status); | ||
97 | } | ||
98 | |||
99 | return compl_status; | ||
100 | } | 220 | } |
101 | 221 | ||
102 | static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage) | 222 | static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage) |
@@ -235,6 +355,18 @@ static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) | |||
235 | return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; | 355 | return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; |
236 | } | 356 | } |
237 | 357 | ||
358 | static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq) | ||
359 | { | ||
360 | struct be_mcc_wrb *wrb = NULL; | ||
361 | if (atomic_read(&mccq->used) < mccq->len) { | ||
362 | wrb = queue_head_node(mccq); | ||
363 | queue_head_inc(mccq); | ||
364 | atomic_inc(&mccq->used); | ||
365 | memset(wrb, 0, sizeof(*wrb)); | ||
366 | } | ||
367 | return wrb; | ||
368 | } | ||
369 | |||
238 | int be_cmd_eq_create(struct be_ctrl_info *ctrl, | 370 | int be_cmd_eq_create(struct be_ctrl_info *ctrl, |
239 | struct be_queue_info *eq, int eq_delay) | 371 | struct be_queue_info *eq, int eq_delay) |
240 | { | 372 | { |
@@ -244,7 +376,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl, | |||
244 | struct be_dma_mem *q_mem = &eq->dma_mem; | 376 | struct be_dma_mem *q_mem = &eq->dma_mem; |
245 | int status; | 377 | int status; |
246 | 378 | ||
247 | spin_lock(&ctrl->cmd_lock); | 379 | spin_lock(&ctrl->mbox_lock); |
248 | memset(wrb, 0, sizeof(*wrb)); | 380 | memset(wrb, 0, sizeof(*wrb)); |
249 | 381 | ||
250 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 382 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -272,7 +404,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl, | |||
272 | eq->id = le16_to_cpu(resp->eq_id); | 404 | eq->id = le16_to_cpu(resp->eq_id); |
273 | eq->created = true; | 405 | eq->created = true; |
274 | } | 406 | } |
275 | spin_unlock(&ctrl->cmd_lock); | 407 | spin_unlock(&ctrl->mbox_lock); |
276 | return status; | 408 | return status; |
277 | } | 409 | } |
278 | 410 | ||
@@ -284,7 +416,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr, | |||
284 | struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); | 416 | struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); |
285 | int status; | 417 | int status; |
286 | 418 | ||
287 | spin_lock(&ctrl->cmd_lock); | 419 | spin_lock(&ctrl->mbox_lock); |
288 | memset(wrb, 0, sizeof(*wrb)); | 420 | memset(wrb, 0, sizeof(*wrb)); |
289 | 421 | ||
290 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 422 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -304,7 +436,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr, | |||
304 | if (!status) | 436 | if (!status) |
305 | memcpy(mac_addr, resp->mac.addr, ETH_ALEN); | 437 | memcpy(mac_addr, resp->mac.addr, ETH_ALEN); |
306 | 438 | ||
307 | spin_unlock(&ctrl->cmd_lock); | 439 | spin_unlock(&ctrl->mbox_lock); |
308 | return status; | 440 | return status; |
309 | } | 441 | } |
310 | 442 | ||
@@ -315,7 +447,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr, | |||
315 | struct be_cmd_req_pmac_add *req = embedded_payload(wrb); | 447 | struct be_cmd_req_pmac_add *req = embedded_payload(wrb); |
316 | int status; | 448 | int status; |
317 | 449 | ||
318 | spin_lock(&ctrl->cmd_lock); | 450 | spin_lock(&ctrl->mbox_lock); |
319 | memset(wrb, 0, sizeof(*wrb)); | 451 | memset(wrb, 0, sizeof(*wrb)); |
320 | 452 | ||
321 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 453 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -332,7 +464,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr, | |||
332 | *pmac_id = le32_to_cpu(resp->pmac_id); | 464 | *pmac_id = le32_to_cpu(resp->pmac_id); |
333 | } | 465 | } |
334 | 466 | ||
335 | spin_unlock(&ctrl->cmd_lock); | 467 | spin_unlock(&ctrl->mbox_lock); |
336 | return status; | 468 | return status; |
337 | } | 469 | } |
338 | 470 | ||
@@ -342,7 +474,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id) | |||
342 | struct be_cmd_req_pmac_del *req = embedded_payload(wrb); | 474 | struct be_cmd_req_pmac_del *req = embedded_payload(wrb); |
343 | int status; | 475 | int status; |
344 | 476 | ||
345 | spin_lock(&ctrl->cmd_lock); | 477 | spin_lock(&ctrl->mbox_lock); |
346 | memset(wrb, 0, sizeof(*wrb)); | 478 | memset(wrb, 0, sizeof(*wrb)); |
347 | 479 | ||
348 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 480 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -354,7 +486,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id) | |||
354 | req->pmac_id = cpu_to_le32(pmac_id); | 486 | req->pmac_id = cpu_to_le32(pmac_id); |
355 | 487 | ||
356 | status = be_mbox_db_ring(ctrl); | 488 | status = be_mbox_db_ring(ctrl); |
357 | spin_unlock(&ctrl->cmd_lock); | 489 | spin_unlock(&ctrl->mbox_lock); |
358 | 490 | ||
359 | return status; | 491 | return status; |
360 | } | 492 | } |
@@ -370,7 +502,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl, | |||
370 | void *ctxt = &req->context; | 502 | void *ctxt = &req->context; |
371 | int status; | 503 | int status; |
372 | 504 | ||
373 | spin_lock(&ctrl->cmd_lock); | 505 | spin_lock(&ctrl->mbox_lock); |
374 | memset(wrb, 0, sizeof(*wrb)); | 506 | memset(wrb, 0, sizeof(*wrb)); |
375 | 507 | ||
376 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 508 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -388,7 +520,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl, | |||
388 | AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); | 520 | AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); |
389 | AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); | 521 | AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); |
390 | AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); | 522 | AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); |
391 | AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0); | 523 | AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); |
392 | AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func); | 524 | AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func); |
393 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | 525 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); |
394 | 526 | ||
@@ -399,7 +531,56 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl, | |||
399 | cq->id = le16_to_cpu(resp->cq_id); | 531 | cq->id = le16_to_cpu(resp->cq_id); |
400 | cq->created = true; | 532 | cq->created = true; |
401 | } | 533 | } |
402 | spin_unlock(&ctrl->cmd_lock); | 534 | spin_unlock(&ctrl->mbox_lock); |
535 | |||
536 | return status; | ||
537 | } | ||
538 | |||
539 | static u32 be_encoded_q_len(int q_len) | ||
540 | { | ||
541 | u32 len_encoded = fls(q_len); /* log2(len) + 1 */ | ||
542 | if (len_encoded == 16) | ||
543 | len_encoded = 0; | ||
544 | return len_encoded; | ||
545 | } | ||
546 | |||
547 | int be_cmd_mccq_create(struct be_ctrl_info *ctrl, | ||
548 | struct be_queue_info *mccq, | ||
549 | struct be_queue_info *cq) | ||
550 | { | ||
551 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | ||
552 | struct be_cmd_req_mcc_create *req = embedded_payload(wrb); | ||
553 | struct be_dma_mem *q_mem = &mccq->dma_mem; | ||
554 | void *ctxt = &req->context; | ||
555 | int status; | ||
556 | |||
557 | spin_lock(&ctrl->mbox_lock); | ||
558 | memset(wrb, 0, sizeof(*wrb)); | ||
559 | |||
560 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | ||
561 | |||
562 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | ||
563 | OPCODE_COMMON_MCC_CREATE, sizeof(*req)); | ||
564 | |||
565 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | ||
566 | |||
567 | AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func); | ||
568 | AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); | ||
569 | AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, | ||
570 | be_encoded_q_len(mccq->len)); | ||
571 | AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); | ||
572 | |||
573 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | ||
574 | |||
575 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | ||
576 | |||
577 | status = be_mbox_db_ring(ctrl); | ||
578 | if (!status) { | ||
579 | struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); | ||
580 | mccq->id = le16_to_cpu(resp->id); | ||
581 | mccq->created = true; | ||
582 | } | ||
583 | spin_unlock(&ctrl->mbox_lock); | ||
403 | 584 | ||
404 | return status; | 585 | return status; |
405 | } | 586 | } |
@@ -415,7 +596,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl, | |||
415 | int status; | 596 | int status; |
416 | u32 len_encoded; | 597 | u32 len_encoded; |
417 | 598 | ||
418 | spin_lock(&ctrl->cmd_lock); | 599 | spin_lock(&ctrl->mbox_lock); |
419 | memset(wrb, 0, sizeof(*wrb)); | 600 | memset(wrb, 0, sizeof(*wrb)); |
420 | 601 | ||
421 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 602 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -446,7 +627,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl, | |||
446 | txq->id = le16_to_cpu(resp->cid); | 627 | txq->id = le16_to_cpu(resp->cid); |
447 | txq->created = true; | 628 | txq->created = true; |
448 | } | 629 | } |
449 | spin_unlock(&ctrl->cmd_lock); | 630 | spin_unlock(&ctrl->mbox_lock); |
450 | 631 | ||
451 | return status; | 632 | return status; |
452 | } | 633 | } |
@@ -460,7 +641,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl, | |||
460 | struct be_dma_mem *q_mem = &rxq->dma_mem; | 641 | struct be_dma_mem *q_mem = &rxq->dma_mem; |
461 | int status; | 642 | int status; |
462 | 643 | ||
463 | spin_lock(&ctrl->cmd_lock); | 644 | spin_lock(&ctrl->mbox_lock); |
464 | memset(wrb, 0, sizeof(*wrb)); | 645 | memset(wrb, 0, sizeof(*wrb)); |
465 | 646 | ||
466 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 647 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -482,7 +663,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl, | |||
482 | rxq->id = le16_to_cpu(resp->id); | 663 | rxq->id = le16_to_cpu(resp->id); |
483 | rxq->created = true; | 664 | rxq->created = true; |
484 | } | 665 | } |
485 | spin_unlock(&ctrl->cmd_lock); | 666 | spin_unlock(&ctrl->mbox_lock); |
486 | 667 | ||
487 | return status; | 668 | return status; |
488 | } | 669 | } |
@@ -496,7 +677,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | |||
496 | u8 subsys = 0, opcode = 0; | 677 | u8 subsys = 0, opcode = 0; |
497 | int status; | 678 | int status; |
498 | 679 | ||
499 | spin_lock(&ctrl->cmd_lock); | 680 | spin_lock(&ctrl->mbox_lock); |
500 | 681 | ||
501 | memset(wrb, 0, sizeof(*wrb)); | 682 | memset(wrb, 0, sizeof(*wrb)); |
502 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 683 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -518,6 +699,10 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | |||
518 | subsys = CMD_SUBSYSTEM_ETH; | 699 | subsys = CMD_SUBSYSTEM_ETH; |
519 | opcode = OPCODE_ETH_RX_DESTROY; | 700 | opcode = OPCODE_ETH_RX_DESTROY; |
520 | break; | 701 | break; |
702 | case QTYPE_MCCQ: | ||
703 | subsys = CMD_SUBSYSTEM_COMMON; | ||
704 | opcode = OPCODE_COMMON_MCC_DESTROY; | ||
705 | break; | ||
521 | default: | 706 | default: |
522 | printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n"); | 707 | printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n"); |
523 | status = -1; | 708 | status = -1; |
@@ -528,7 +713,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | |||
528 | 713 | ||
529 | status = be_mbox_db_ring(ctrl); | 714 | status = be_mbox_db_ring(ctrl); |
530 | err: | 715 | err: |
531 | spin_unlock(&ctrl->cmd_lock); | 716 | spin_unlock(&ctrl->mbox_lock); |
532 | 717 | ||
533 | return status; | 718 | return status; |
534 | } | 719 | } |
@@ -541,7 +726,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac, | |||
541 | struct be_cmd_req_if_create *req = embedded_payload(wrb); | 726 | struct be_cmd_req_if_create *req = embedded_payload(wrb); |
542 | int status; | 727 | int status; |
543 | 728 | ||
544 | spin_lock(&ctrl->cmd_lock); | 729 | spin_lock(&ctrl->mbox_lock); |
545 | memset(wrb, 0, sizeof(*wrb)); | 730 | memset(wrb, 0, sizeof(*wrb)); |
546 | 731 | ||
547 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 732 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -562,7 +747,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac, | |||
562 | *pmac_id = le32_to_cpu(resp->pmac_id); | 747 | *pmac_id = le32_to_cpu(resp->pmac_id); |
563 | } | 748 | } |
564 | 749 | ||
565 | spin_unlock(&ctrl->cmd_lock); | 750 | spin_unlock(&ctrl->mbox_lock); |
566 | return status; | 751 | return status; |
567 | } | 752 | } |
568 | 753 | ||
@@ -572,7 +757,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id) | |||
572 | struct be_cmd_req_if_destroy *req = embedded_payload(wrb); | 757 | struct be_cmd_req_if_destroy *req = embedded_payload(wrb); |
573 | int status; | 758 | int status; |
574 | 759 | ||
575 | spin_lock(&ctrl->cmd_lock); | 760 | spin_lock(&ctrl->mbox_lock); |
576 | memset(wrb, 0, sizeof(*wrb)); | 761 | memset(wrb, 0, sizeof(*wrb)); |
577 | 762 | ||
578 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 763 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -583,7 +768,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id) | |||
583 | req->interface_id = cpu_to_le32(interface_id); | 768 | req->interface_id = cpu_to_le32(interface_id); |
584 | status = be_mbox_db_ring(ctrl); | 769 | status = be_mbox_db_ring(ctrl); |
585 | 770 | ||
586 | spin_unlock(&ctrl->cmd_lock); | 771 | spin_unlock(&ctrl->mbox_lock); |
587 | 772 | ||
588 | return status; | 773 | return status; |
589 | } | 774 | } |
@@ -598,7 +783,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd) | |||
598 | struct be_sge *sge = nonembedded_sgl(wrb); | 783 | struct be_sge *sge = nonembedded_sgl(wrb); |
599 | int status; | 784 | int status; |
600 | 785 | ||
601 | spin_lock(&ctrl->cmd_lock); | 786 | spin_lock(&ctrl->mbox_lock); |
602 | memset(wrb, 0, sizeof(*wrb)); | 787 | memset(wrb, 0, sizeof(*wrb)); |
603 | 788 | ||
604 | memset(req, 0, sizeof(*req)); | 789 | memset(req, 0, sizeof(*req)); |
@@ -617,18 +802,20 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd) | |||
617 | be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); | 802 | be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); |
618 | } | 803 | } |
619 | 804 | ||
620 | spin_unlock(&ctrl->cmd_lock); | 805 | spin_unlock(&ctrl->mbox_lock); |
621 | return status; | 806 | return status; |
622 | } | 807 | } |
623 | 808 | ||
624 | int be_cmd_link_status_query(struct be_ctrl_info *ctrl, | 809 | int be_cmd_link_status_query(struct be_ctrl_info *ctrl, |
625 | struct be_link_info *link) | 810 | bool *link_up) |
626 | { | 811 | { |
627 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | 812 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); |
628 | struct be_cmd_req_link_status *req = embedded_payload(wrb); | 813 | struct be_cmd_req_link_status *req = embedded_payload(wrb); |
629 | int status; | 814 | int status; |
630 | 815 | ||
631 | spin_lock(&ctrl->cmd_lock); | 816 | spin_lock(&ctrl->mbox_lock); |
817 | |||
818 | *link_up = false; | ||
632 | memset(wrb, 0, sizeof(*wrb)); | 819 | memset(wrb, 0, sizeof(*wrb)); |
633 | 820 | ||
634 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 821 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -639,14 +826,11 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl, | |||
639 | status = be_mbox_db_ring(ctrl); | 826 | status = be_mbox_db_ring(ctrl); |
640 | if (!status) { | 827 | if (!status) { |
641 | struct be_cmd_resp_link_status *resp = embedded_payload(wrb); | 828 | struct be_cmd_resp_link_status *resp = embedded_payload(wrb); |
642 | link->speed = resp->mac_speed; | 829 | if (resp->mac_speed != PHY_LINK_SPEED_ZERO) |
643 | link->duplex = resp->mac_duplex; | 830 | *link_up = true; |
644 | link->fault = resp->mac_fault; | ||
645 | } else { | ||
646 | link->speed = PHY_LINK_SPEED_ZERO; | ||
647 | } | 831 | } |
648 | 832 | ||
649 | spin_unlock(&ctrl->cmd_lock); | 833 | spin_unlock(&ctrl->mbox_lock); |
650 | return status; | 834 | return status; |
651 | } | 835 | } |
652 | 836 | ||
@@ -656,7 +840,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver) | |||
656 | struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); | 840 | struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); |
657 | int status; | 841 | int status; |
658 | 842 | ||
659 | spin_lock(&ctrl->cmd_lock); | 843 | spin_lock(&ctrl->mbox_lock); |
660 | memset(wrb, 0, sizeof(*wrb)); | 844 | memset(wrb, 0, sizeof(*wrb)); |
661 | 845 | ||
662 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 846 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -670,7 +854,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver) | |||
670 | strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); | 854 | strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); |
671 | } | 855 | } |
672 | 856 | ||
673 | spin_unlock(&ctrl->cmd_lock); | 857 | spin_unlock(&ctrl->mbox_lock); |
674 | return status; | 858 | return status; |
675 | } | 859 | } |
676 | 860 | ||
@@ -681,7 +865,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd) | |||
681 | struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); | 865 | struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); |
682 | int status; | 866 | int status; |
683 | 867 | ||
684 | spin_lock(&ctrl->cmd_lock); | 868 | spin_lock(&ctrl->mbox_lock); |
685 | memset(wrb, 0, sizeof(*wrb)); | 869 | memset(wrb, 0, sizeof(*wrb)); |
686 | 870 | ||
687 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 871 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -696,7 +880,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd) | |||
696 | 880 | ||
697 | status = be_mbox_db_ring(ctrl); | 881 | status = be_mbox_db_ring(ctrl); |
698 | 882 | ||
699 | spin_unlock(&ctrl->cmd_lock); | 883 | spin_unlock(&ctrl->mbox_lock); |
700 | return status; | 884 | return status; |
701 | } | 885 | } |
702 | 886 | ||
@@ -707,7 +891,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array, | |||
707 | struct be_cmd_req_vlan_config *req = embedded_payload(wrb); | 891 | struct be_cmd_req_vlan_config *req = embedded_payload(wrb); |
708 | int status; | 892 | int status; |
709 | 893 | ||
710 | spin_lock(&ctrl->cmd_lock); | 894 | spin_lock(&ctrl->mbox_lock); |
711 | memset(wrb, 0, sizeof(*wrb)); | 895 | memset(wrb, 0, sizeof(*wrb)); |
712 | 896 | ||
713 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 897 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
@@ -726,18 +910,22 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array, | |||
726 | 910 | ||
727 | status = be_mbox_db_ring(ctrl); | 911 | status = be_mbox_db_ring(ctrl); |
728 | 912 | ||
729 | spin_unlock(&ctrl->cmd_lock); | 913 | spin_unlock(&ctrl->mbox_lock); |
730 | return status; | 914 | return status; |
731 | } | 915 | } |
732 | 916 | ||
917 | /* Use MCC for this command as it may be called in BH context */ | ||
733 | int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en) | 918 | int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en) |
734 | { | 919 | { |
735 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | 920 | struct be_mcc_wrb *wrb; |
736 | struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb); | 921 | struct be_cmd_req_promiscuous_config *req; |
737 | int status; | ||
738 | 922 | ||
739 | spin_lock(&ctrl->cmd_lock); | 923 | spin_lock_bh(&ctrl->mcc_lock); |
740 | memset(wrb, 0, sizeof(*wrb)); | 924 | |
925 | wrb = wrb_from_mcc(&ctrl->mcc_obj.q); | ||
926 | BUG_ON(!wrb); | ||
927 | |||
928 | req = embedded_payload(wrb); | ||
741 | 929 | ||
742 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 930 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
743 | 931 | ||
@@ -749,21 +937,29 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en) | |||
749 | else | 937 | else |
750 | req->port0_promiscuous = en; | 938 | req->port0_promiscuous = en; |
751 | 939 | ||
752 | status = be_mbox_db_ring(ctrl); | 940 | be_mcc_notify_wait(ctrl); |
753 | 941 | ||
754 | spin_unlock(&ctrl->cmd_lock); | 942 | spin_unlock_bh(&ctrl->mcc_lock); |
755 | return status; | 943 | return 0; |
756 | } | 944 | } |
757 | 945 | ||
758 | int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table, | 946 | /* |
759 | u32 num, bool promiscuous) | 947 | * Use MCC for this command as it may be called in BH context |
948 | * (mc == NULL) => multicast promiscous | ||
949 | */ | ||
950 | int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id, | ||
951 | struct dev_mc_list *mc_list, u32 mc_count) | ||
760 | { | 952 | { |
761 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | 953 | #define BE_MAX_MC 32 /* set mcast promisc if > 32 */ |
762 | struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb); | 954 | struct be_mcc_wrb *wrb; |
763 | int status; | 955 | struct be_cmd_req_mcast_mac_config *req; |
764 | 956 | ||
765 | spin_lock(&ctrl->cmd_lock); | 957 | spin_lock_bh(&ctrl->mcc_lock); |
766 | memset(wrb, 0, sizeof(*wrb)); | 958 | |
959 | wrb = wrb_from_mcc(&ctrl->mcc_obj.q); | ||
960 | BUG_ON(!wrb); | ||
961 | |||
962 | req = embedded_payload(wrb); | ||
767 | 963 | ||
768 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 964 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
769 | 965 | ||
@@ -771,17 +967,23 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table, | |||
771 | OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); | 967 | OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); |
772 | 968 | ||
773 | req->interface_id = if_id; | 969 | req->interface_id = if_id; |
774 | req->promiscuous = promiscuous; | 970 | if (mc_list && mc_count <= BE_MAX_MC) { |
775 | if (!promiscuous) { | 971 | int i; |
776 | req->num_mac = cpu_to_le16(num); | 972 | struct dev_mc_list *mc; |
777 | if (num) | 973 | |
778 | memcpy(req->mac, mac_table, ETH_ALEN * num); | 974 | req->num_mac = cpu_to_le16(mc_count); |
975 | |||
976 | for (mc = mc_list, i = 0; mc; mc = mc->next, i++) | ||
977 | memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); | ||
978 | } else { | ||
979 | req->promiscuous = 1; | ||
779 | } | 980 | } |
780 | 981 | ||
781 | status = be_mbox_db_ring(ctrl); | 982 | be_mcc_notify_wait(ctrl); |
782 | 983 | ||
783 | spin_unlock(&ctrl->cmd_lock); | 984 | spin_unlock_bh(&ctrl->mcc_lock); |
784 | return status; | 985 | |
986 | return 0; | ||
785 | } | 987 | } |
786 | 988 | ||
787 | int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) | 989 | int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) |
@@ -790,7 +992,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) | |||
790 | struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); | 992 | struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); |
791 | int status; | 993 | int status; |
792 | 994 | ||
793 | spin_lock(&ctrl->cmd_lock); | 995 | spin_lock(&ctrl->mbox_lock); |
794 | 996 | ||
795 | memset(wrb, 0, sizeof(*wrb)); | 997 | memset(wrb, 0, sizeof(*wrb)); |
796 | 998 | ||
@@ -804,7 +1006,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) | |||
804 | 1006 | ||
805 | status = be_mbox_db_ring(ctrl); | 1007 | status = be_mbox_db_ring(ctrl); |
806 | 1008 | ||
807 | spin_unlock(&ctrl->cmd_lock); | 1009 | spin_unlock(&ctrl->mbox_lock); |
808 | return status; | 1010 | return status; |
809 | } | 1011 | } |
810 | 1012 | ||
@@ -814,7 +1016,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc) | |||
814 | struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); | 1016 | struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); |
815 | int status; | 1017 | int status; |
816 | 1018 | ||
817 | spin_lock(&ctrl->cmd_lock); | 1019 | spin_lock(&ctrl->mbox_lock); |
818 | 1020 | ||
819 | memset(wrb, 0, sizeof(*wrb)); | 1021 | memset(wrb, 0, sizeof(*wrb)); |
820 | 1022 | ||
@@ -831,7 +1033,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc) | |||
831 | *rx_fc = le16_to_cpu(resp->rx_flow_control); | 1033 | *rx_fc = le16_to_cpu(resp->rx_flow_control); |
832 | } | 1034 | } |
833 | 1035 | ||
834 | spin_unlock(&ctrl->cmd_lock); | 1036 | spin_unlock(&ctrl->mbox_lock); |
835 | return status; | 1037 | return status; |
836 | } | 1038 | } |
837 | 1039 | ||
@@ -841,7 +1043,7 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num) | |||
841 | struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); | 1043 | struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); |
842 | int status; | 1044 | int status; |
843 | 1045 | ||
844 | spin_lock(&ctrl->cmd_lock); | 1046 | spin_lock(&ctrl->mbox_lock); |
845 | 1047 | ||
846 | memset(wrb, 0, sizeof(*wrb)); | 1048 | memset(wrb, 0, sizeof(*wrb)); |
847 | 1049 | ||
@@ -856,6 +1058,6 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num) | |||
856 | *port_num = le32_to_cpu(resp->phys_port); | 1058 | *port_num = le32_to_cpu(resp->phys_port); |
857 | } | 1059 | } |
858 | 1060 | ||
859 | spin_unlock(&ctrl->cmd_lock); | 1061 | spin_unlock(&ctrl->mbox_lock); |
860 | return status; | 1062 | return status; |
861 | } | 1063 | } |
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index e499e2d5b8c3..747626da7b4e 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h | |||
@@ -76,6 +76,34 @@ struct be_mcc_cq_entry { | |||
76 | u32 flags; /* dword 3 */ | 76 | u32 flags; /* dword 3 */ |
77 | }; | 77 | }; |
78 | 78 | ||
79 | /* When the async bit of mcc_compl is set, the last 4 bytes of | ||
80 | * mcc_compl is interpreted as follows: | ||
81 | */ | ||
82 | #define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ | ||
83 | #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF | ||
84 | #define ASYNC_EVENT_CODE_LINK_STATE 0x1 | ||
85 | struct be_async_event_trailer { | ||
86 | u32 code; | ||
87 | }; | ||
88 | |||
89 | enum { | ||
90 | ASYNC_EVENT_LINK_DOWN = 0x0, | ||
91 | ASYNC_EVENT_LINK_UP = 0x1 | ||
92 | }; | ||
93 | |||
94 | /* When the event code of an async trailer is link-state, the mcc_compl | ||
95 | * must be interpreted as follows | ||
96 | */ | ||
97 | struct be_async_event_link_state { | ||
98 | u8 physical_port; | ||
99 | u8 port_link_status; | ||
100 | u8 port_duplex; | ||
101 | u8 port_speed; | ||
102 | u8 port_fault; | ||
103 | u8 rsvd0[7]; | ||
104 | struct be_async_event_trailer trailer; | ||
105 | } __packed; | ||
106 | |||
79 | struct be_mcc_mailbox { | 107 | struct be_mcc_mailbox { |
80 | struct be_mcc_wrb wrb; | 108 | struct be_mcc_wrb wrb; |
81 | struct be_mcc_cq_entry cqe; | 109 | struct be_mcc_cq_entry cqe; |
@@ -101,6 +129,7 @@ struct be_mcc_mailbox { | |||
101 | #define OPCODE_COMMON_FIRMWARE_CONFIG 42 | 129 | #define OPCODE_COMMON_FIRMWARE_CONFIG 42 |
102 | #define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50 | 130 | #define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50 |
103 | #define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51 | 131 | #define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51 |
132 | #define OPCODE_COMMON_MCC_DESTROY 53 | ||
104 | #define OPCODE_COMMON_CQ_DESTROY 54 | 133 | #define OPCODE_COMMON_CQ_DESTROY 54 |
105 | #define OPCODE_COMMON_EQ_DESTROY 55 | 134 | #define OPCODE_COMMON_EQ_DESTROY 55 |
106 | #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 | 135 | #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 |
@@ -269,6 +298,38 @@ struct be_cmd_resp_cq_create { | |||
269 | u16 rsvd0; | 298 | u16 rsvd0; |
270 | } __packed; | 299 | } __packed; |
271 | 300 | ||
301 | /******************** Create MCCQ ***************************/ | ||
302 | /* Pseudo amap definition in which each bit of the actual structure is defined | ||
303 | * as a byte: used to calculate offset/shift/mask of each field */ | ||
304 | struct amap_mcc_context { | ||
305 | u8 con_index[14]; | ||
306 | u8 rsvd0[2]; | ||
307 | u8 ring_size[4]; | ||
308 | u8 fetch_wrb; | ||
309 | u8 fetch_r2t; | ||
310 | u8 cq_id[10]; | ||
311 | u8 prod_index[14]; | ||
312 | u8 fid[8]; | ||
313 | u8 pdid[9]; | ||
314 | u8 valid; | ||
315 | u8 rsvd1[32]; | ||
316 | u8 rsvd2[32]; | ||
317 | } __packed; | ||
318 | |||
319 | struct be_cmd_req_mcc_create { | ||
320 | struct be_cmd_req_hdr hdr; | ||
321 | u16 num_pages; | ||
322 | u16 rsvd0; | ||
323 | u8 context[sizeof(struct amap_mcc_context) / 8]; | ||
324 | struct phys_addr pages[8]; | ||
325 | } __packed; | ||
326 | |||
327 | struct be_cmd_resp_mcc_create { | ||
328 | struct be_cmd_resp_hdr hdr; | ||
329 | u16 id; | ||
330 | u16 rsvd0; | ||
331 | } __packed; | ||
332 | |||
272 | /******************** Create TxQ ***************************/ | 333 | /******************** Create TxQ ***************************/ |
273 | #define BE_ETH_TX_RING_TYPE_STANDARD 2 | 334 | #define BE_ETH_TX_RING_TYPE_STANDARD 2 |
274 | #define BE_ULP1_NUM 1 | 335 | #define BE_ULP1_NUM 1 |
@@ -341,7 +402,8 @@ enum { | |||
341 | QTYPE_EQ = 1, | 402 | QTYPE_EQ = 1, |
342 | QTYPE_CQ, | 403 | QTYPE_CQ, |
343 | QTYPE_TXQ, | 404 | QTYPE_TXQ, |
344 | QTYPE_RXQ | 405 | QTYPE_RXQ, |
406 | QTYPE_MCCQ | ||
345 | }; | 407 | }; |
346 | 408 | ||
347 | struct be_cmd_req_q_destroy { | 409 | struct be_cmd_req_q_destroy { |
@@ -546,12 +608,6 @@ struct be_cmd_req_link_status { | |||
546 | u32 rsvd; | 608 | u32 rsvd; |
547 | }; | 609 | }; |
548 | 610 | ||
549 | struct be_link_info { | ||
550 | u8 duplex; | ||
551 | u8 speed; | ||
552 | u8 fault; | ||
553 | }; | ||
554 | |||
555 | enum { | 611 | enum { |
556 | PHY_LINK_DUPLEX_NONE = 0x0, | 612 | PHY_LINK_DUPLEX_NONE = 0x0, |
557 | PHY_LINK_DUPLEX_HALF = 0x1, | 613 | PHY_LINK_DUPLEX_HALF = 0x1, |
@@ -657,6 +713,9 @@ extern int be_cmd_cq_create(struct be_ctrl_info *ctrl, | |||
657 | struct be_queue_info *cq, struct be_queue_info *eq, | 713 | struct be_queue_info *cq, struct be_queue_info *eq, |
658 | bool sol_evts, bool no_delay, | 714 | bool sol_evts, bool no_delay, |
659 | int num_cqe_dma_coalesce); | 715 | int num_cqe_dma_coalesce); |
716 | extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl, | ||
717 | struct be_queue_info *mccq, | ||
718 | struct be_queue_info *cq); | ||
660 | extern int be_cmd_txq_create(struct be_ctrl_info *ctrl, | 719 | extern int be_cmd_txq_create(struct be_ctrl_info *ctrl, |
661 | struct be_queue_info *txq, | 720 | struct be_queue_info *txq, |
662 | struct be_queue_info *cq); | 721 | struct be_queue_info *cq); |
@@ -667,7 +726,7 @@ extern int be_cmd_rxq_create(struct be_ctrl_info *ctrl, | |||
667 | extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | 726 | extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, |
668 | int type); | 727 | int type); |
669 | extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl, | 728 | extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl, |
670 | struct be_link_info *link); | 729 | bool *link_up); |
671 | extern int be_cmd_reset(struct be_ctrl_info *ctrl); | 730 | extern int be_cmd_reset(struct be_ctrl_info *ctrl); |
672 | extern int be_cmd_get_stats(struct be_ctrl_info *ctrl, | 731 | extern int be_cmd_get_stats(struct be_ctrl_info *ctrl, |
673 | struct be_dma_mem *nonemb_cmd); | 732 | struct be_dma_mem *nonemb_cmd); |
@@ -679,10 +738,11 @@ extern int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, | |||
679 | bool promiscuous); | 738 | bool promiscuous); |
680 | extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, | 739 | extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, |
681 | u8 port_num, bool en); | 740 | u8 port_num, bool en); |
682 | extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, | 741 | extern int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id, |
683 | u8 *mac_table, u32 num, bool promiscuous); | 742 | struct dev_mc_list *mc_list, u32 mc_count); |
684 | extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, | 743 | extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, |
685 | u32 tx_fc, u32 rx_fc); | 744 | u32 tx_fc, u32 rx_fc); |
686 | extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, | 745 | extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, |
687 | u32 *tx_fc, u32 *rx_fc); | 746 | u32 *tx_fc, u32 *rx_fc); |
688 | extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num); | 747 | extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num); |
748 | extern void be_process_mcc(struct be_ctrl_info *ctrl); | ||
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 9592f22e4c8c..cccc5419ad72 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
@@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | |||
162 | return -EINVAL; | 162 | return -EINVAL; |
163 | 163 | ||
164 | adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; | 164 | adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; |
165 | if (adapter->max_rx_coal > MAX_SKB_FRAGS) | 165 | if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME) |
166 | adapter->max_rx_coal = MAX_SKB_FRAGS - 1; | 166 | adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME; |
167 | 167 | ||
168 | /* if AIC is being turned on now, start with an EQD of 0 */ | 168 | /* if AIC is being turned on now, start with an EQD of 0 */ |
169 | if (rx_eq->enable_aic == 0 && | 169 | if (rx_eq->enable_aic == 0 && |
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index b132aa4893ca..b02e805c1db3 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h | |||
@@ -61,7 +61,7 @@ | |||
61 | /* Clear the interrupt for this eq */ | 61 | /* Clear the interrupt for this eq */ |
62 | #define DB_EQ_CLR_SHIFT (9) /* bit 9 */ | 62 | #define DB_EQ_CLR_SHIFT (9) /* bit 9 */ |
63 | /* Must be 1 */ | 63 | /* Must be 1 */ |
64 | #define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ | 64 | #define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ |
65 | /* Number of event entries processed */ | 65 | /* Number of event entries processed */ |
66 | #define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ | 66 | #define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ |
67 | /* Rearm bit */ | 67 | /* Rearm bit */ |
@@ -88,6 +88,12 @@ | |||
88 | /* Number of rx frags posted */ | 88 | /* Number of rx frags posted */ |
89 | #define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */ | 89 | #define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */ |
90 | 90 | ||
91 | /********** MCC door bell ************/ | ||
92 | #define DB_MCCQ_OFFSET 0x140 | ||
93 | #define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */ | ||
94 | /* Number of entries posted */ | ||
95 | #define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ | ||
96 | |||
91 | /* | 97 | /* |
92 | * BE descriptors: host memory data structures whose formats | 98 | * BE descriptors: host memory data structures whose formats |
93 | * are hardwired in BE silicon. | 99 | * are hardwired in BE silicon. |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 66bb56874d9b..308eb09ca56b 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -60,26 +60,6 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, | |||
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline void *queue_head_node(struct be_queue_info *q) | ||
64 | { | ||
65 | return q->dma_mem.va + q->head * q->entry_size; | ||
66 | } | ||
67 | |||
68 | static inline void *queue_tail_node(struct be_queue_info *q) | ||
69 | { | ||
70 | return q->dma_mem.va + q->tail * q->entry_size; | ||
71 | } | ||
72 | |||
73 | static inline void queue_head_inc(struct be_queue_info *q) | ||
74 | { | ||
75 | index_inc(&q->head, q->len); | ||
76 | } | ||
77 | |||
78 | static inline void queue_tail_inc(struct be_queue_info *q) | ||
79 | { | ||
80 | index_inc(&q->tail, q->len); | ||
81 | } | ||
82 | |||
83 | static void be_intr_set(struct be_ctrl_info *ctrl, bool enable) | 63 | static void be_intr_set(struct be_ctrl_info *ctrl, bool enable) |
84 | { | 64 | { |
85 | u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; | 65 | u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; |
@@ -127,7 +107,7 @@ static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid, | |||
127 | iowrite32(val, ctrl->db + DB_EQ_OFFSET); | 107 | iowrite32(val, ctrl->db + DB_EQ_OFFSET); |
128 | } | 108 | } |
129 | 109 | ||
130 | static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, | 110 | void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, |
131 | bool arm, u16 num_popped) | 111 | bool arm, u16 num_popped) |
132 | { | 112 | { |
133 | u32 val = 0; | 113 | u32 val = 0; |
@@ -234,28 +214,24 @@ static void netdev_stats_update(struct be_adapter *adapter) | |||
234 | dev_stats->tx_window_errors = 0; | 214 | dev_stats->tx_window_errors = 0; |
235 | } | 215 | } |
236 | 216 | ||
237 | static void be_link_status_update(struct be_adapter *adapter) | 217 | void be_link_status_update(void *ctxt, bool link_up) |
238 | { | 218 | { |
239 | struct be_link_info *prev = &adapter->link; | 219 | struct be_adapter *adapter = ctxt; |
240 | struct be_link_info now = { 0 }; | ||
241 | struct net_device *netdev = adapter->netdev; | 220 | struct net_device *netdev = adapter->netdev; |
242 | 221 | ||
243 | be_cmd_link_status_query(&adapter->ctrl, &now); | ||
244 | |||
245 | /* If link came up or went down */ | 222 | /* If link came up or went down */ |
246 | if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO || | 223 | if (adapter->link_up != link_up) { |
247 | prev->speed == PHY_LINK_SPEED_ZERO)) { | 224 | if (link_up) { |
248 | if (now.speed == PHY_LINK_SPEED_ZERO) { | ||
249 | netif_stop_queue(netdev); | ||
250 | netif_carrier_off(netdev); | ||
251 | printk(KERN_INFO "%s: Link down\n", netdev->name); | ||
252 | } else { | ||
253 | netif_start_queue(netdev); | 225 | netif_start_queue(netdev); |
254 | netif_carrier_on(netdev); | 226 | netif_carrier_on(netdev); |
255 | printk(KERN_INFO "%s: Link up\n", netdev->name); | 227 | printk(KERN_INFO "%s: Link up\n", netdev->name); |
228 | } else { | ||
229 | netif_stop_queue(netdev); | ||
230 | netif_carrier_off(netdev); | ||
231 | printk(KERN_INFO "%s: Link down\n", netdev->name); | ||
256 | } | 232 | } |
233 | adapter->link_up = link_up; | ||
257 | } | 234 | } |
258 | *prev = now; | ||
259 | } | 235 | } |
260 | 236 | ||
261 | /* Update the EQ delay n BE based on the RX frags consumed / sec */ | 237 | /* Update the EQ delay n BE based on the RX frags consumed / sec */ |
@@ -569,47 +545,32 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) | |||
569 | be_vid_config(netdev); | 545 | be_vid_config(netdev); |
570 | } | 546 | } |
571 | 547 | ||
572 | static void be_set_multicast_filter(struct net_device *netdev) | 548 | static void be_set_multicast_list(struct net_device *netdev) |
573 | { | 549 | { |
574 | struct be_adapter *adapter = netdev_priv(netdev); | 550 | struct be_adapter *adapter = netdev_priv(netdev); |
575 | struct dev_mc_list *mc_ptr; | 551 | struct be_ctrl_info *ctrl = &adapter->ctrl; |
576 | u8 mac_addr[32][ETH_ALEN]; | ||
577 | int i = 0; | ||
578 | 552 | ||
579 | if (netdev->flags & IFF_ALLMULTI) { | 553 | if (netdev->flags & IFF_PROMISC) { |
580 | /* set BE in Multicast promiscuous */ | 554 | be_cmd_promiscuous_config(ctrl, adapter->port_num, 1); |
581 | be_cmd_mcast_mac_set(&adapter->ctrl, | 555 | adapter->promiscuous = true; |
582 | adapter->if_handle, NULL, 0, true); | 556 | goto done; |
583 | return; | ||
584 | } | 557 | } |
585 | 558 | ||
586 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | 559 | /* BE was previously in promiscous mode; disable it */ |
587 | memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN); | 560 | if (adapter->promiscuous) { |
588 | if (++i >= 32) { | 561 | adapter->promiscuous = false; |
589 | be_cmd_mcast_mac_set(&adapter->ctrl, | 562 | be_cmd_promiscuous_config(ctrl, adapter->port_num, 0); |
590 | adapter->if_handle, &mac_addr[0][0], i, false); | ||
591 | i = 0; | ||
592 | } | ||
593 | |||
594 | } | 563 | } |
595 | 564 | ||
596 | if (i) { | 565 | if (netdev->flags & IFF_ALLMULTI) { |
597 | /* reset the promiscuous mode also. */ | 566 | be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0); |
598 | be_cmd_mcast_mac_set(&adapter->ctrl, | 567 | goto done; |
599 | adapter->if_handle, &mac_addr[0][0], i, false); | ||
600 | } | 568 | } |
601 | } | ||
602 | |||
603 | static void be_set_multicast_list(struct net_device *netdev) | ||
604 | { | ||
605 | struct be_adapter *adapter = netdev_priv(netdev); | ||
606 | 569 | ||
607 | if (netdev->flags & IFF_PROMISC) { | 570 | be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list, |
608 | be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1); | 571 | netdev->mc_count); |
609 | } else { | 572 | done: |
610 | be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0); | 573 | return; |
611 | be_set_multicast_filter(netdev); | ||
612 | } | ||
613 | } | 574 | } |
614 | 575 | ||
615 | static void be_rx_rate_update(struct be_adapter *adapter) | 576 | static void be_rx_rate_update(struct be_adapter *adapter) |
@@ -705,7 +666,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
705 | { | 666 | { |
706 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 667 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
707 | struct be_rx_page_info *page_info; | 668 | struct be_rx_page_info *page_info; |
708 | u16 rxq_idx, i, num_rcvd; | 669 | u16 rxq_idx, i, num_rcvd, j; |
709 | u32 pktsize, hdr_len, curr_frag_len; | 670 | u32 pktsize, hdr_len, curr_frag_len; |
710 | u8 *start; | 671 | u8 *start; |
711 | 672 | ||
@@ -748,22 +709,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter, | |||
748 | 709 | ||
749 | /* More frags present for this completion */ | 710 | /* More frags present for this completion */ |
750 | pktsize -= curr_frag_len; /* account for above copied frag */ | 711 | pktsize -= curr_frag_len; /* account for above copied frag */ |
751 | for (i = 1; i < num_rcvd; i++) { | 712 | for (i = 1, j = 0; i < num_rcvd; i++) { |
752 | index_inc(&rxq_idx, rxq->len); | 713 | index_inc(&rxq_idx, rxq->len); |
753 | page_info = get_rx_page_info(adapter, rxq_idx); | 714 | page_info = get_rx_page_info(adapter, rxq_idx); |
754 | 715 | ||
755 | curr_frag_len = min(pktsize, rx_frag_size); | 716 | curr_frag_len = min(pktsize, rx_frag_size); |
756 | 717 | ||
757 | skb_shinfo(skb)->frags[i].page = page_info->page; | 718 | /* Coalesce all frags from the same physical page in one slot */ |
758 | skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset; | 719 | if (page_info->page_offset == 0) { |
759 | skb_shinfo(skb)->frags[i].size = curr_frag_len; | 720 | /* Fresh page */ |
721 | j++; | ||
722 | skb_shinfo(skb)->frags[j].page = page_info->page; | ||
723 | skb_shinfo(skb)->frags[j].page_offset = | ||
724 | page_info->page_offset; | ||
725 | skb_shinfo(skb)->frags[j].size = 0; | ||
726 | skb_shinfo(skb)->nr_frags++; | ||
727 | } else { | ||
728 | put_page(page_info->page); | ||
729 | } | ||
730 | |||
731 | skb_shinfo(skb)->frags[j].size += curr_frag_len; | ||
760 | skb->len += curr_frag_len; | 732 | skb->len += curr_frag_len; |
761 | skb->data_len += curr_frag_len; | 733 | skb->data_len += curr_frag_len; |
762 | skb_shinfo(skb)->nr_frags++; | ||
763 | pktsize -= curr_frag_len; | 734 | pktsize -= curr_frag_len; |
764 | 735 | ||
765 | memset(page_info, 0, sizeof(*page_info)); | 736 | memset(page_info, 0, sizeof(*page_info)); |
766 | } | 737 | } |
738 | BUG_ON(j > MAX_SKB_FRAGS); | ||
767 | 739 | ||
768 | done: | 740 | done: |
769 | be_rx_stats_update(adapter, pktsize, num_rcvd); | 741 | be_rx_stats_update(adapter, pktsize, num_rcvd); |
@@ -825,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, | |||
825 | struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; | 797 | struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; |
826 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 798 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
827 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; | 799 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; |
828 | u16 i, rxq_idx = 0, vid; | 800 | u16 i, rxq_idx = 0, vid, j; |
829 | 801 | ||
830 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | 802 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); |
831 | pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | 803 | pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); |
@@ -833,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, | |||
833 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 805 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); |
834 | 806 | ||
835 | remaining = pkt_size; | 807 | remaining = pkt_size; |
836 | for (i = 0; i < num_rcvd; i++) { | 808 | for (i = 0, j = -1; i < num_rcvd; i++) { |
837 | page_info = get_rx_page_info(adapter, rxq_idx); | 809 | page_info = get_rx_page_info(adapter, rxq_idx); |
838 | 810 | ||
839 | curr_frag_len = min(remaining, rx_frag_size); | 811 | curr_frag_len = min(remaining, rx_frag_size); |
840 | 812 | ||
841 | rx_frags[i].page = page_info->page; | 813 | /* Coalesce all frags from the same physical page in one slot */ |
842 | rx_frags[i].page_offset = page_info->page_offset; | 814 | if (i == 0 || page_info->page_offset == 0) { |
843 | rx_frags[i].size = curr_frag_len; | 815 | /* First frag or Fresh page */ |
844 | remaining -= curr_frag_len; | 816 | j++; |
817 | rx_frags[j].page = page_info->page; | ||
818 | rx_frags[j].page_offset = page_info->page_offset; | ||
819 | rx_frags[j].size = 0; | ||
820 | } else { | ||
821 | put_page(page_info->page); | ||
822 | } | ||
823 | rx_frags[j].size += curr_frag_len; | ||
845 | 824 | ||
825 | remaining -= curr_frag_len; | ||
846 | index_inc(&rxq_idx, rxq->len); | 826 | index_inc(&rxq_idx, rxq->len); |
847 | |||
848 | memset(page_info, 0, sizeof(*page_info)); | 827 | memset(page_info, 0, sizeof(*page_info)); |
849 | } | 828 | } |
829 | BUG_ON(j > MAX_SKB_FRAGS); | ||
850 | 830 | ||
851 | if (likely(!vlanf)) { | 831 | if (likely(!vlanf)) { |
852 | lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, | 832 | lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, |
@@ -960,10 +940,8 @@ static void be_post_rx_frags(struct be_adapter *adapter) | |||
960 | return; | 940 | return; |
961 | } | 941 | } |
962 | 942 | ||
963 | static struct be_eth_tx_compl * | 943 | static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) |
964 | be_tx_compl_get(struct be_adapter *adapter) | ||
965 | { | 944 | { |
966 | struct be_queue_info *tx_cq = &adapter->tx_obj.cq; | ||
967 | struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); | 945 | struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); |
968 | 946 | ||
969 | if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) | 947 | if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) |
@@ -1051,6 +1029,59 @@ static void be_tx_q_clean(struct be_adapter *adapter) | |||
1051 | } | 1029 | } |
1052 | } | 1030 | } |
1053 | 1031 | ||
1032 | static void be_mcc_queues_destroy(struct be_adapter *adapter) | ||
1033 | { | ||
1034 | struct be_queue_info *q; | ||
1035 | struct be_ctrl_info *ctrl = &adapter->ctrl; | ||
1036 | |||
1037 | q = &ctrl->mcc_obj.q; | ||
1038 | if (q->created) | ||
1039 | be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); | ||
1040 | be_queue_free(adapter, q); | ||
1041 | |||
1042 | q = &ctrl->mcc_obj.cq; | ||
1043 | if (q->created) | ||
1044 | be_cmd_q_destroy(ctrl, q, QTYPE_CQ); | ||
1045 | be_queue_free(adapter, q); | ||
1046 | } | ||
1047 | |||
1048 | /* Must be called only after TX qs are created as MCC shares TX EQ */ | ||
1049 | static int be_mcc_queues_create(struct be_adapter *adapter) | ||
1050 | { | ||
1051 | struct be_queue_info *q, *cq; | ||
1052 | struct be_ctrl_info *ctrl = &adapter->ctrl; | ||
1053 | |||
1054 | /* Alloc MCC compl queue */ | ||
1055 | cq = &ctrl->mcc_obj.cq; | ||
1056 | if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, | ||
1057 | sizeof(struct be_mcc_cq_entry))) | ||
1058 | goto err; | ||
1059 | |||
1060 | /* Ask BE to create MCC compl queue; share TX's eq */ | ||
1061 | if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0)) | ||
1062 | goto mcc_cq_free; | ||
1063 | |||
1064 | /* Alloc MCC queue */ | ||
1065 | q = &ctrl->mcc_obj.q; | ||
1066 | if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) | ||
1067 | goto mcc_cq_destroy; | ||
1068 | |||
1069 | /* Ask BE to create MCC queue */ | ||
1070 | if (be_cmd_mccq_create(ctrl, q, cq)) | ||
1071 | goto mcc_q_free; | ||
1072 | |||
1073 | return 0; | ||
1074 | |||
1075 | mcc_q_free: | ||
1076 | be_queue_free(adapter, q); | ||
1077 | mcc_cq_destroy: | ||
1078 | be_cmd_q_destroy(ctrl, cq, QTYPE_CQ); | ||
1079 | mcc_cq_free: | ||
1080 | be_queue_free(adapter, cq); | ||
1081 | err: | ||
1082 | return -1; | ||
1083 | } | ||
1084 | |||
1054 | static void be_tx_queues_destroy(struct be_adapter *adapter) | 1085 | static void be_tx_queues_destroy(struct be_adapter *adapter) |
1055 | { | 1086 | { |
1056 | struct be_queue_info *q; | 1087 | struct be_queue_info *q; |
@@ -1263,7 +1294,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev) | |||
1263 | return IRQ_HANDLED; | 1294 | return IRQ_HANDLED; |
1264 | } | 1295 | } |
1265 | 1296 | ||
1266 | static irqreturn_t be_msix_tx(int irq, void *dev) | 1297 | static irqreturn_t be_msix_tx_mcc(int irq, void *dev) |
1267 | { | 1298 | { |
1268 | struct be_adapter *adapter = dev; | 1299 | struct be_adapter *adapter = dev; |
1269 | 1300 | ||
@@ -1324,40 +1355,51 @@ int be_poll_rx(struct napi_struct *napi, int budget) | |||
1324 | return work_done; | 1355 | return work_done; |
1325 | } | 1356 | } |
1326 | 1357 | ||
1327 | /* For TX we don't honour budget; consume everything */ | 1358 | void be_process_tx(struct be_adapter *adapter) |
1328 | int be_poll_tx(struct napi_struct *napi, int budget) | ||
1329 | { | 1359 | { |
1330 | struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); | 1360 | struct be_queue_info *txq = &adapter->tx_obj.q; |
1331 | struct be_adapter *adapter = | 1361 | struct be_queue_info *tx_cq = &adapter->tx_obj.cq; |
1332 | container_of(tx_eq, struct be_adapter, tx_eq); | ||
1333 | struct be_tx_obj *tx_obj = &adapter->tx_obj; | ||
1334 | struct be_queue_info *tx_cq = &tx_obj->cq; | ||
1335 | struct be_queue_info *txq = &tx_obj->q; | ||
1336 | struct be_eth_tx_compl *txcp; | 1362 | struct be_eth_tx_compl *txcp; |
1337 | u32 num_cmpl = 0; | 1363 | u32 num_cmpl = 0; |
1338 | u16 end_idx; | 1364 | u16 end_idx; |
1339 | 1365 | ||
1340 | while ((txcp = be_tx_compl_get(adapter))) { | 1366 | while ((txcp = be_tx_compl_get(tx_cq))) { |
1341 | end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, | 1367 | end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, |
1342 | wrb_index, txcp); | 1368 | wrb_index, txcp); |
1343 | be_tx_compl_process(adapter, end_idx); | 1369 | be_tx_compl_process(adapter, end_idx); |
1344 | num_cmpl++; | 1370 | num_cmpl++; |
1345 | } | 1371 | } |
1346 | 1372 | ||
1347 | /* As Tx wrbs have been freed up, wake up netdev queue if | 1373 | if (num_cmpl) { |
1348 | * it was stopped due to lack of tx wrbs. | 1374 | be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); |
1349 | */ | 1375 | |
1350 | if (netif_queue_stopped(adapter->netdev) && | 1376 | /* As Tx wrbs have been freed up, wake up netdev queue if |
1377 | * it was stopped due to lack of tx wrbs. | ||
1378 | */ | ||
1379 | if (netif_queue_stopped(adapter->netdev) && | ||
1351 | atomic_read(&txq->used) < txq->len / 2) { | 1380 | atomic_read(&txq->used) < txq->len / 2) { |
1352 | netif_wake_queue(adapter->netdev); | 1381 | netif_wake_queue(adapter->netdev); |
1382 | } | ||
1383 | |||
1384 | drvr_stats(adapter)->be_tx_events++; | ||
1385 | drvr_stats(adapter)->be_tx_compl += num_cmpl; | ||
1353 | } | 1386 | } |
1387 | } | ||
1388 | |||
1389 | /* As TX and MCC share the same EQ check for both TX and MCC completions. | ||
1390 | * For TX/MCC we don't honour budget; consume everything | ||
1391 | */ | ||
1392 | static int be_poll_tx_mcc(struct napi_struct *napi, int budget) | ||
1393 | { | ||
1394 | struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); | ||
1395 | struct be_adapter *adapter = | ||
1396 | container_of(tx_eq, struct be_adapter, tx_eq); | ||
1354 | 1397 | ||
1355 | napi_complete(napi); | 1398 | napi_complete(napi); |
1356 | 1399 | ||
1357 | be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); | 1400 | be_process_tx(adapter); |
1358 | 1401 | ||
1359 | drvr_stats(adapter)->be_tx_events++; | 1402 | be_process_mcc(&adapter->ctrl); |
1360 | drvr_stats(adapter)->be_tx_compl += num_cmpl; | ||
1361 | 1403 | ||
1362 | return 1; | 1404 | return 1; |
1363 | } | 1405 | } |
@@ -1368,9 +1410,6 @@ static void be_worker(struct work_struct *work) | |||
1368 | container_of(work, struct be_adapter, work.work); | 1410 | container_of(work, struct be_adapter, work.work); |
1369 | int status; | 1411 | int status; |
1370 | 1412 | ||
1371 | /* Check link */ | ||
1372 | be_link_status_update(adapter); | ||
1373 | |||
1374 | /* Get Stats */ | 1413 | /* Get Stats */ |
1375 | status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd); | 1414 | status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd); |
1376 | if (!status) | 1415 | if (!status) |
@@ -1419,7 +1458,7 @@ static int be_msix_register(struct be_adapter *adapter) | |||
1419 | 1458 | ||
1420 | sprintf(tx_eq->desc, "%s-tx", netdev->name); | 1459 | sprintf(tx_eq->desc, "%s-tx", netdev->name); |
1421 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | 1460 | vec = be_msix_vec_get(adapter, tx_eq->q.id); |
1422 | status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter); | 1461 | status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter); |
1423 | if (status) | 1462 | if (status) |
1424 | goto err; | 1463 | goto err; |
1425 | 1464 | ||
@@ -1495,6 +1534,39 @@ static int be_open(struct net_device *netdev) | |||
1495 | struct be_ctrl_info *ctrl = &adapter->ctrl; | 1534 | struct be_ctrl_info *ctrl = &adapter->ctrl; |
1496 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | 1535 | struct be_eq_obj *rx_eq = &adapter->rx_eq; |
1497 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | 1536 | struct be_eq_obj *tx_eq = &adapter->tx_eq; |
1537 | bool link_up; | ||
1538 | int status; | ||
1539 | |||
1540 | /* First time posting */ | ||
1541 | be_post_rx_frags(adapter); | ||
1542 | |||
1543 | napi_enable(&rx_eq->napi); | ||
1544 | napi_enable(&tx_eq->napi); | ||
1545 | |||
1546 | be_irq_register(adapter); | ||
1547 | |||
1548 | be_intr_set(ctrl, true); | ||
1549 | |||
1550 | /* The evt queues are created in unarmed state; arm them */ | ||
1551 | be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); | ||
1552 | be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); | ||
1553 | |||
1554 | /* Rx compl queue may be in unarmed state; rearm it */ | ||
1555 | be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0); | ||
1556 | |||
1557 | status = be_cmd_link_status_query(ctrl, &link_up); | ||
1558 | if (status) | ||
1559 | return status; | ||
1560 | be_link_status_update(adapter, link_up); | ||
1561 | |||
1562 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); | ||
1563 | return 0; | ||
1564 | } | ||
1565 | |||
1566 | static int be_setup(struct be_adapter *adapter) | ||
1567 | { | ||
1568 | struct be_ctrl_info *ctrl = &adapter->ctrl; | ||
1569 | struct net_device *netdev = adapter->netdev; | ||
1498 | u32 if_flags; | 1570 | u32 if_flags; |
1499 | int status; | 1571 | int status; |
1500 | 1572 | ||
@@ -1521,29 +1593,14 @@ static int be_open(struct net_device *netdev) | |||
1521 | if (status != 0) | 1593 | if (status != 0) |
1522 | goto tx_qs_destroy; | 1594 | goto tx_qs_destroy; |
1523 | 1595 | ||
1524 | /* First time posting */ | 1596 | status = be_mcc_queues_create(adapter); |
1525 | be_post_rx_frags(adapter); | 1597 | if (status != 0) |
1526 | 1598 | goto rx_qs_destroy; | |
1527 | napi_enable(&rx_eq->napi); | ||
1528 | napi_enable(&tx_eq->napi); | ||
1529 | |||
1530 | be_irq_register(adapter); | ||
1531 | |||
1532 | be_intr_set(ctrl, true); | ||
1533 | |||
1534 | /* The evt queues are created in the unarmed state; arm them */ | ||
1535 | be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); | ||
1536 | be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); | ||
1537 | |||
1538 | /* The compl queues are created in the unarmed state; arm them */ | ||
1539 | be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0); | ||
1540 | be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0); | ||
1541 | |||
1542 | be_link_status_update(adapter); | ||
1543 | 1599 | ||
1544 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); | ||
1545 | return 0; | 1600 | return 0; |
1546 | 1601 | ||
1602 | rx_qs_destroy: | ||
1603 | be_rx_queues_destroy(adapter); | ||
1547 | tx_qs_destroy: | 1604 | tx_qs_destroy: |
1548 | be_tx_queues_destroy(adapter); | 1605 | be_tx_queues_destroy(adapter); |
1549 | if_destroy: | 1606 | if_destroy: |
@@ -1552,6 +1609,19 @@ do_none: | |||
1552 | return status; | 1609 | return status; |
1553 | } | 1610 | } |
1554 | 1611 | ||
1612 | static int be_clear(struct be_adapter *adapter) | ||
1613 | { | ||
1614 | struct be_ctrl_info *ctrl = &adapter->ctrl; | ||
1615 | |||
1616 | be_rx_queues_destroy(adapter); | ||
1617 | be_tx_queues_destroy(adapter); | ||
1618 | |||
1619 | be_cmd_if_destroy(ctrl, adapter->if_handle); | ||
1620 | |||
1621 | be_mcc_queues_destroy(adapter); | ||
1622 | return 0; | ||
1623 | } | ||
1624 | |||
1555 | static int be_close(struct net_device *netdev) | 1625 | static int be_close(struct net_device *netdev) |
1556 | { | 1626 | { |
1557 | struct be_adapter *adapter = netdev_priv(netdev); | 1627 | struct be_adapter *adapter = netdev_priv(netdev); |
@@ -1564,7 +1634,7 @@ static int be_close(struct net_device *netdev) | |||
1564 | 1634 | ||
1565 | netif_stop_queue(netdev); | 1635 | netif_stop_queue(netdev); |
1566 | netif_carrier_off(netdev); | 1636 | netif_carrier_off(netdev); |
1567 | adapter->link.speed = PHY_LINK_SPEED_ZERO; | 1637 | adapter->link_up = false; |
1568 | 1638 | ||
1569 | be_intr_set(ctrl, false); | 1639 | be_intr_set(ctrl, false); |
1570 | 1640 | ||
@@ -1581,10 +1651,6 @@ static int be_close(struct net_device *netdev) | |||
1581 | napi_disable(&rx_eq->napi); | 1651 | napi_disable(&rx_eq->napi); |
1582 | napi_disable(&tx_eq->napi); | 1652 | napi_disable(&tx_eq->napi); |
1583 | 1653 | ||
1584 | be_rx_queues_destroy(adapter); | ||
1585 | be_tx_queues_destroy(adapter); | ||
1586 | |||
1587 | be_cmd_if_destroy(ctrl, adapter->if_handle); | ||
1588 | return 0; | 1654 | return 0; |
1589 | } | 1655 | } |
1590 | 1656 | ||
@@ -1673,7 +1739,7 @@ static void be_netdev_init(struct net_device *netdev) | |||
1673 | 1739 | ||
1674 | netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, | 1740 | netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, |
1675 | BE_NAPI_WEIGHT); | 1741 | BE_NAPI_WEIGHT); |
1676 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx, | 1742 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, |
1677 | BE_NAPI_WEIGHT); | 1743 | BE_NAPI_WEIGHT); |
1678 | 1744 | ||
1679 | netif_carrier_off(netdev); | 1745 | netif_carrier_off(netdev); |
@@ -1755,7 +1821,12 @@ static int be_ctrl_init(struct be_adapter *adapter) | |||
1755 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); | 1821 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); |
1756 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | 1822 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); |
1757 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | 1823 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); |
1758 | spin_lock_init(&ctrl->cmd_lock); | 1824 | spin_lock_init(&ctrl->mbox_lock); |
1825 | spin_lock_init(&ctrl->mcc_lock); | ||
1826 | spin_lock_init(&ctrl->mcc_cq_lock); | ||
1827 | |||
1828 | ctrl->async_cb = be_link_status_update; | ||
1829 | ctrl->adapter_ctxt = adapter; | ||
1759 | 1830 | ||
1760 | val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); | 1831 | val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); |
1761 | ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) & | 1832 | ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) & |
@@ -1793,6 +1864,8 @@ static void __devexit be_remove(struct pci_dev *pdev) | |||
1793 | 1864 | ||
1794 | unregister_netdev(adapter->netdev); | 1865 | unregister_netdev(adapter->netdev); |
1795 | 1866 | ||
1867 | be_clear(adapter); | ||
1868 | |||
1796 | be_stats_cleanup(adapter); | 1869 | be_stats_cleanup(adapter); |
1797 | 1870 | ||
1798 | be_ctrl_cleanup(adapter); | 1871 | be_ctrl_cleanup(adapter); |
@@ -1890,13 +1963,18 @@ static int __devinit be_probe(struct pci_dev *pdev, | |||
1890 | be_netdev_init(netdev); | 1963 | be_netdev_init(netdev); |
1891 | SET_NETDEV_DEV(netdev, &adapter->pdev->dev); | 1964 | SET_NETDEV_DEV(netdev, &adapter->pdev->dev); |
1892 | 1965 | ||
1966 | status = be_setup(adapter); | ||
1967 | if (status) | ||
1968 | goto stats_clean; | ||
1893 | status = register_netdev(netdev); | 1969 | status = register_netdev(netdev); |
1894 | if (status != 0) | 1970 | if (status != 0) |
1895 | goto stats_clean; | 1971 | goto unsetup; |
1896 | 1972 | ||
1897 | dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); | 1973 | dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); |
1898 | return 0; | 1974 | return 0; |
1899 | 1975 | ||
1976 | unsetup: | ||
1977 | be_clear(adapter); | ||
1900 | stats_clean: | 1978 | stats_clean: |
1901 | be_stats_cleanup(adapter); | 1979 | be_stats_cleanup(adapter); |
1902 | ctrl_clean: | 1980 | ctrl_clean: |
@@ -1921,6 +1999,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) | |||
1921 | if (netif_running(netdev)) { | 1999 | if (netif_running(netdev)) { |
1922 | rtnl_lock(); | 2000 | rtnl_lock(); |
1923 | be_close(netdev); | 2001 | be_close(netdev); |
2002 | be_clear(adapter); | ||
1924 | rtnl_unlock(); | 2003 | rtnl_unlock(); |
1925 | } | 2004 | } |
1926 | 2005 | ||
@@ -1947,6 +2026,7 @@ static int be_resume(struct pci_dev *pdev) | |||
1947 | 2026 | ||
1948 | if (netif_running(netdev)) { | 2027 | if (netif_running(netdev)) { |
1949 | rtnl_lock(); | 2028 | rtnl_lock(); |
2029 | be_setup(adapter); | ||
1950 | be_open(netdev); | 2030 | be_open(netdev); |
1951 | rtnl_unlock(); | 2031 | rtnl_unlock(); |
1952 | } | 2032 | } |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 38f1c3375d7f..b70cc99962fc 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -6825,6 +6825,14 @@ bnx2_nway_reset(struct net_device *dev) | |||
6825 | return 0; | 6825 | return 0; |
6826 | } | 6826 | } |
6827 | 6827 | ||
6828 | static u32 | ||
6829 | bnx2_get_link(struct net_device *dev) | ||
6830 | { | ||
6831 | struct bnx2 *bp = netdev_priv(dev); | ||
6832 | |||
6833 | return bp->link_up; | ||
6834 | } | ||
6835 | |||
6828 | static int | 6836 | static int |
6829 | bnx2_get_eeprom_len(struct net_device *dev) | 6837 | bnx2_get_eeprom_len(struct net_device *dev) |
6830 | { | 6838 | { |
@@ -7392,7 +7400,7 @@ static const struct ethtool_ops bnx2_ethtool_ops = { | |||
7392 | .get_wol = bnx2_get_wol, | 7400 | .get_wol = bnx2_get_wol, |
7393 | .set_wol = bnx2_set_wol, | 7401 | .set_wol = bnx2_set_wol, |
7394 | .nway_reset = bnx2_nway_reset, | 7402 | .nway_reset = bnx2_nway_reset, |
7395 | .get_link = ethtool_op_get_link, | 7403 | .get_link = bnx2_get_link, |
7396 | .get_eeprom_len = bnx2_get_eeprom_len, | 7404 | .get_eeprom_len = bnx2_get_eeprom_len, |
7397 | .get_eeprom = bnx2_get_eeprom, | 7405 | .get_eeprom = bnx2_get_eeprom, |
7398 | .set_eeprom = bnx2_set_eeprom, | 7406 | .set_eeprom = bnx2_set_eeprom, |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index fbf1352e9c1c..951714a7f90a 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -8637,6 +8637,14 @@ static int bnx2x_nway_reset(struct net_device *dev) | |||
8637 | return 0; | 8637 | return 0; |
8638 | } | 8638 | } |
8639 | 8639 | ||
8640 | static u32 | ||
8641 | bnx2x_get_link(struct net_device *dev) | ||
8642 | { | ||
8643 | struct bnx2x *bp = netdev_priv(dev); | ||
8644 | |||
8645 | return bp->link_vars.link_up; | ||
8646 | } | ||
8647 | |||
8640 | static int bnx2x_get_eeprom_len(struct net_device *dev) | 8648 | static int bnx2x_get_eeprom_len(struct net_device *dev) |
8641 | { | 8649 | { |
8642 | struct bnx2x *bp = netdev_priv(dev); | 8650 | struct bnx2x *bp = netdev_priv(dev); |
@@ -10034,7 +10042,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = { | |||
10034 | .get_msglevel = bnx2x_get_msglevel, | 10042 | .get_msglevel = bnx2x_get_msglevel, |
10035 | .set_msglevel = bnx2x_set_msglevel, | 10043 | .set_msglevel = bnx2x_set_msglevel, |
10036 | .nway_reset = bnx2x_nway_reset, | 10044 | .nway_reset = bnx2x_nway_reset, |
10037 | .get_link = ethtool_op_get_link, | 10045 | .get_link = bnx2x_get_link, |
10038 | .get_eeprom_len = bnx2x_get_eeprom_len, | 10046 | .get_eeprom_len = bnx2x_get_eeprom_len, |
10039 | .get_eeprom = bnx2x_get_eeprom, | 10047 | .get_eeprom = bnx2x_get_eeprom, |
10040 | .set_eeprom = bnx2x_set_eeprom, | 10048 | .set_eeprom = bnx2x_set_eeprom, |
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index d5e18812bf49..33821a81cbf8 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig | |||
@@ -36,7 +36,7 @@ config CAN_CALC_BITTIMING | |||
36 | If unsure, say Y. | 36 | If unsure, say Y. |
37 | 37 | ||
38 | config CAN_SJA1000 | 38 | config CAN_SJA1000 |
39 | depends on CAN_DEV | 39 | depends on CAN_DEV && HAS_IOMEM |
40 | tristate "Philips SJA1000" | 40 | tristate "Philips SJA1000" |
41 | ---help--- | 41 | ---help--- |
42 | Driver for the SJA1000 CAN controllers from Philips or NXP | 42 | Driver for the SJA1000 CAN controllers from Philips or NXP |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 44f77eb1180f..4d1515f45ba2 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -25,8 +25,6 @@ | |||
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/ethtool.h> | 26 | #include <linux/ethtool.h> |
27 | #include <linux/if_vlan.h> | 27 | #include <linux/if_vlan.h> |
28 | #include <linux/module.h> | ||
29 | |||
30 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 28 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
31 | #define BCM_VLAN 1 | 29 | #define BCM_VLAN 1 |
32 | #endif | 30 | #endif |
@@ -2521,9 +2519,9 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) | |||
2521 | struct cnic_dev *cdev; | 2519 | struct cnic_dev *cdev; |
2522 | struct cnic_local *cp; | 2520 | struct cnic_local *cp; |
2523 | struct cnic_eth_dev *ethdev = NULL; | 2521 | struct cnic_eth_dev *ethdev = NULL; |
2524 | struct cnic_eth_dev *(*probe)(void *) = NULL; | 2522 | struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; |
2525 | 2523 | ||
2526 | probe = __symbol_get("bnx2_cnic_probe"); | 2524 | probe = symbol_get(bnx2_cnic_probe); |
2527 | if (probe) { | 2525 | if (probe) { |
2528 | ethdev = (*probe)(dev); | 2526 | ethdev = (*probe)(dev); |
2529 | symbol_put_addr(probe); | 2527 | symbol_put_addr(probe); |
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 06380963a34e..d1bce27ee99e 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h | |||
@@ -296,4 +296,6 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); | |||
296 | 296 | ||
297 | extern int cnic_unregister_driver(int ulp_type); | 297 | extern int cnic_unregister_driver(int ulp_type); |
298 | 298 | ||
299 | extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); | ||
300 | |||
299 | #endif | 301 | #endif |
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 58afafbd3b9c..fd5e32cbcb87 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = { | |||
1097 | .ndo_start_xmit = cpmac_start_xmit, | 1097 | .ndo_start_xmit = cpmac_start_xmit, |
1098 | .ndo_tx_timeout = cpmac_tx_timeout, | 1098 | .ndo_tx_timeout = cpmac_tx_timeout, |
1099 | .ndo_set_multicast_list = cpmac_set_multicast_list, | 1099 | .ndo_set_multicast_list = cpmac_set_multicast_list, |
1100 | .ndo_so_ioctl = cpmac_ioctl, | 1100 | .ndo_do_ioctl = cpmac_ioctl, |
1101 | .ndo_set_config = cpmac_config, | 1101 | .ndo_set_config = cpmac_config, |
1102 | .ndo_change_mtu = eth_change_mtu, | 1102 | .ndo_change_mtu = eth_change_mtu, |
1103 | .ndo_validate_addr = eth_validate_addr, | 1103 | .ndo_validate_addr = eth_validate_addr, |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 5e3356f8eb5a..5b8cbdb4b520 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2185 | /* Free all the Rx ring sk_buffs */ | 2185 | /* Free all the Rx ring sk_buffs */ |
2186 | for (i = 0; i < rx_ring->count; i++) { | 2186 | for (i = 0; i < rx_ring->count; i++) { |
2187 | buffer_info = &rx_ring->buffer_info[i]; | 2187 | buffer_info = &rx_ring->buffer_info[i]; |
2188 | if (buffer_info->skb) { | 2188 | if (buffer_info->dma) { |
2189 | pci_unmap_single(pdev, | 2189 | pci_unmap_single(pdev, |
2190 | buffer_info->dma, | 2190 | buffer_info->dma, |
2191 | buffer_info->length, | 2191 | buffer_info->length, |
2192 | PCI_DMA_FROMDEVICE); | 2192 | PCI_DMA_FROMDEVICE); |
2193 | } | ||
2194 | |||
2195 | buffer_info->dma = 0; | ||
2193 | 2196 | ||
2197 | if (buffer_info->skb) { | ||
2194 | dev_kfree_skb(buffer_info->skb); | 2198 | dev_kfree_skb(buffer_info->skb); |
2195 | buffer_info->skb = NULL; | 2199 | buffer_info->skb = NULL; |
2196 | } | 2200 | } |
@@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4033 | buffer_info->dma, | 4037 | buffer_info->dma, |
4034 | buffer_info->length, | 4038 | buffer_info->length, |
4035 | PCI_DMA_FROMDEVICE); | 4039 | PCI_DMA_FROMDEVICE); |
4040 | buffer_info->dma = 0; | ||
4036 | 4041 | ||
4037 | length = le16_to_cpu(rx_desc->length); | 4042 | length = le16_to_cpu(rx_desc->length); |
4038 | /* !EOP means multiple descriptors were used to store a single | 4043 | /* !EOP means multiple descriptors were used to store a single |
@@ -4222,6 +4227,7 @@ map_skb: | |||
4222 | pci_unmap_single(pdev, buffer_info->dma, | 4227 | pci_unmap_single(pdev, buffer_info->dma, |
4223 | adapter->rx_buffer_len, | 4228 | adapter->rx_buffer_len, |
4224 | PCI_DMA_FROMDEVICE); | 4229 | PCI_DMA_FROMDEVICE); |
4230 | buffer_info->dma = 0; | ||
4225 | 4231 | ||
4226 | break; /* while !buffer_info->skb */ | 4232 | break; /* while !buffer_info->skb */ |
4227 | } | 4233 | } |
@@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | |||
4817 | 4823 | ||
4818 | netif_device_detach(netdev); | 4824 | netif_device_detach(netdev); |
4819 | 4825 | ||
4826 | if (state == pci_channel_io_perm_failure) | ||
4827 | return PCI_ERS_RESULT_DISCONNECT; | ||
4828 | |||
4820 | if (netif_running(netdev)) | 4829 | if (netif_running(netdev)) |
4821 | e1000_down(adapter); | 4830 | e1000_down(adapter); |
4822 | pci_disable_device(pdev); | 4831 | pci_disable_device(pdev); |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 677f60490f67..63415bb6f48f 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1997,7 +1997,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) | |||
1997 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); | 1997 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); |
1998 | struct e1000_hw *hw = &adapter->hw; | 1998 | struct e1000_hw *hw = &adapter->hw; |
1999 | struct net_device *poll_dev = adapter->netdev; | 1999 | struct net_device *poll_dev = adapter->netdev; |
2000 | int tx_cleaned = 0, work_done = 0; | 2000 | int tx_cleaned = 1, work_done = 0; |
2001 | 2001 | ||
2002 | adapter = netdev_priv(poll_dev); | 2002 | adapter = netdev_priv(poll_dev); |
2003 | 2003 | ||
@@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | |||
4785 | 4785 | ||
4786 | netif_device_detach(netdev); | 4786 | netif_device_detach(netdev); |
4787 | 4787 | ||
4788 | if (state == pci_channel_io_perm_failure) | ||
4789 | return PCI_ERS_RESULT_DISCONNECT; | ||
4790 | |||
4788 | if (netif_running(netdev)) | 4791 | if (netif_running(netdev)) |
4789 | e1000e_down(adapter); | 4792 | e1000e_down(adapter); |
4790 | pci_disable_device(pdev); | 4793 | pci_disable_device(pdev); |
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index 3af581303ca2..d167090248e2 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c | |||
@@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus) | |||
188 | } | 188 | } |
189 | 189 | ||
190 | 190 | ||
191 | #ifdef CONFIG_GIANFAR | 191 | #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) |
192 | static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) | 192 | static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) |
193 | { | 193 | { |
194 | struct gfar __iomem *enet_regs; | 194 | struct gfar __iomem *enet_regs; |
@@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) | |||
206 | #endif | 206 | #endif |
207 | 207 | ||
208 | 208 | ||
209 | #ifdef CONFIG_UCC_GETH | 209 | #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) |
210 | static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) | 210 | static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) |
211 | { | 211 | { |
212 | struct device_node *np = NULL; | 212 | struct device_node *np = NULL; |
@@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
291 | if (of_device_is_compatible(np, "fsl,gianfar-mdio") || | 291 | if (of_device_is_compatible(np, "fsl,gianfar-mdio") || |
292 | of_device_is_compatible(np, "fsl,gianfar-tbi") || | 292 | of_device_is_compatible(np, "fsl,gianfar-tbi") || |
293 | of_device_is_compatible(np, "gianfar")) { | 293 | of_device_is_compatible(np, "gianfar")) { |
294 | #ifdef CONFIG_GIANFAR | 294 | #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) |
295 | tbipa = get_gfar_tbipa(regs); | 295 | tbipa = get_gfar_tbipa(regs); |
296 | #else | 296 | #else |
297 | err = -ENODEV; | 297 | err = -ENODEV; |
@@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
299 | #endif | 299 | #endif |
300 | } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || | 300 | } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || |
301 | of_device_is_compatible(np, "ucc_geth_phy")) { | 301 | of_device_is_compatible(np, "ucc_geth_phy")) { |
302 | #ifdef CONFIG_UCC_GETH | 302 | #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) |
303 | u32 id; | 303 | u32 id; |
304 | static u32 mii_mng_master; | 304 | static u32 mii_mng_master; |
305 | 305 | ||
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ea17319624aa..be480292aba1 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -4549,11 +4549,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | |||
4549 | cleaned = true; | 4549 | cleaned = true; |
4550 | cleaned_count++; | 4550 | cleaned_count++; |
4551 | 4551 | ||
4552 | /* this is the fast path for the non-packet split case */ | ||
4552 | if (!adapter->rx_ps_hdr_size) { | 4553 | if (!adapter->rx_ps_hdr_size) { |
4553 | pci_unmap_single(pdev, buffer_info->dma, | 4554 | pci_unmap_single(pdev, buffer_info->dma, |
4554 | adapter->rx_buffer_len + | 4555 | adapter->rx_buffer_len, |
4555 | NET_IP_ALIGN, | ||
4556 | PCI_DMA_FROMDEVICE); | 4556 | PCI_DMA_FROMDEVICE); |
4557 | buffer_info->dma = 0; | ||
4557 | skb_put(skb, length); | 4558 | skb_put(skb, length); |
4558 | goto send_up; | 4559 | goto send_up; |
4559 | } | 4560 | } |
@@ -4570,8 +4571,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | |||
4570 | 4571 | ||
4571 | if (!skb_shinfo(skb)->nr_frags) { | 4572 | if (!skb_shinfo(skb)->nr_frags) { |
4572 | pci_unmap_single(pdev, buffer_info->dma, | 4573 | pci_unmap_single(pdev, buffer_info->dma, |
4573 | adapter->rx_ps_hdr_size + NET_IP_ALIGN, | 4574 | adapter->rx_ps_hdr_size, |
4574 | PCI_DMA_FROMDEVICE); | 4575 | PCI_DMA_FROMDEVICE); |
4576 | buffer_info->dma = 0; | ||
4575 | skb_put(skb, hlen); | 4577 | skb_put(skb, hlen); |
4576 | } | 4578 | } |
4577 | 4579 | ||
@@ -4713,7 +4715,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
4713 | bufsz = adapter->rx_ps_hdr_size; | 4715 | bufsz = adapter->rx_ps_hdr_size; |
4714 | else | 4716 | else |
4715 | bufsz = adapter->rx_buffer_len; | 4717 | bufsz = adapter->rx_buffer_len; |
4716 | bufsz += NET_IP_ALIGN; | ||
4717 | 4718 | ||
4718 | while (cleaned_count--) { | 4719 | while (cleaned_count--) { |
4719 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); | 4720 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); |
@@ -4737,7 +4738,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
4737 | } | 4738 | } |
4738 | 4739 | ||
4739 | if (!buffer_info->skb) { | 4740 | if (!buffer_info->skb) { |
4740 | skb = netdev_alloc_skb(netdev, bufsz); | 4741 | skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); |
4741 | if (!skb) { | 4742 | if (!skb) { |
4742 | adapter->alloc_rx_buff_failed++; | 4743 | adapter->alloc_rx_buff_failed++; |
4743 | goto no_buffers; | 4744 | goto no_buffers; |
@@ -5338,6 +5339,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, | |||
5338 | 5339 | ||
5339 | netif_device_detach(netdev); | 5340 | netif_device_detach(netdev); |
5340 | 5341 | ||
5342 | if (state == pci_channel_io_perm_failure) | ||
5343 | return PCI_ERS_RESULT_DISCONNECT; | ||
5344 | |||
5341 | if (netif_running(netdev)) | 5345 | if (netif_running(netdev)) |
5342 | igb_down(adapter); | 5346 | igb_down(adapter); |
5343 | pci_disable_device(pdev); | 5347 | pci_disable_device(pdev); |
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index f3eed6a8fba5..911c082cee5a 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c | |||
@@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size) | |||
677 | return 0; | 677 | return 0; |
678 | } | 678 | } |
679 | 679 | ||
680 | static const struct net_device_ops bfin_sir_ndo = { | ||
681 | .ndo_open = bfin_sir_open, | ||
682 | .ndo_stop = bfin_sir_stop, | ||
683 | .ndo_start_xmit = bfin_sir_hard_xmit, | ||
684 | .ndo_do_ioctl = bfin_sir_ioctl, | ||
685 | .ndo_get_stats = bfin_sir_stats, | ||
686 | }; | ||
687 | |||
680 | static int __devinit bfin_sir_probe(struct platform_device *pdev) | 688 | static int __devinit bfin_sir_probe(struct platform_device *pdev) |
681 | { | 689 | { |
682 | struct net_device *dev; | 690 | struct net_device *dev; |
@@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev) | |||
718 | if (err) | 726 | if (err) |
719 | goto err_mem_3; | 727 | goto err_mem_3; |
720 | 728 | ||
721 | dev->hard_start_xmit = bfin_sir_hard_xmit; | 729 | dev->netdev_ops = &bfin_sir_ndo; |
722 | dev->open = bfin_sir_open; | 730 | dev->irq = sir_port->irq; |
723 | dev->stop = bfin_sir_stop; | ||
724 | dev->do_ioctl = bfin_sir_ioctl; | ||
725 | dev->get_stats = bfin_sir_stats; | ||
726 | dev->irq = sir_port->irq; | ||
727 | 731 | ||
728 | irda_init_max_qos_capabilies(&self->qos); | 732 | irda_init_max_qos_capabilies(&self->qos); |
729 | 733 | ||
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 86f4f3e36f27..0f7b6a3a2e68 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev, | |||
139 | ecmd->autoneg = AUTONEG_ENABLE; | 139 | ecmd->autoneg = AUTONEG_ENABLE; |
140 | ecmd->transceiver = XCVR_EXTERNAL; | 140 | ecmd->transceiver = XCVR_EXTERNAL; |
141 | if ((hw->phy.media_type == ixgbe_media_type_copper) || | 141 | if ((hw->phy.media_type == ixgbe_media_type_copper) || |
142 | (hw->mac.type == ixgbe_mac_82599EB)) { | 142 | (hw->phy.multispeed_fiber)) { |
143 | ecmd->supported |= (SUPPORTED_1000baseT_Full | | 143 | ecmd->supported |= (SUPPORTED_1000baseT_Full | |
144 | SUPPORTED_Autoneg); | 144 | SUPPORTED_Autoneg); |
145 | 145 | ||
@@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
217 | s32 err = 0; | 217 | s32 err = 0; |
218 | 218 | ||
219 | if ((hw->phy.media_type == ixgbe_media_type_copper) || | 219 | if ((hw->phy.media_type == ixgbe_media_type_copper) || |
220 | (hw->mac.type == ixgbe_mac_82599EB)) { | 220 | (hw->phy.multispeed_fiber)) { |
221 | /* 10000/copper and 1000/copper must autoneg | 221 | /* 10000/copper and 1000/copper must autoneg |
222 | * this function does not support any duplex forcing, but can | 222 | * this function does not support any duplex forcing, but can |
223 | * limit the advertising of the adapter to only 10000 or 1000 */ | 223 | * limit the advertising of the adapter to only 10000 or 1000 */ |
@@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
245 | } else { | 245 | } else { |
246 | /* in this case we currently only support 10Gb/FULL */ | 246 | /* in this case we currently only support 10Gb/FULL */ |
247 | if ((ecmd->autoneg == AUTONEG_ENABLE) || | 247 | if ((ecmd->autoneg == AUTONEG_ENABLE) || |
248 | (ecmd->advertising != ADVERTISED_10000baseT_Full) || | ||
248 | (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) | 249 | (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) |
249 | return -EINVAL; | 250 | return -EINVAL; |
250 | } | 251 | } |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e756e220db32..5588ef493a3d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -563,7 +563,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
563 | union ixgbe_adv_rx_desc *rx_desc; | 563 | union ixgbe_adv_rx_desc *rx_desc; |
564 | struct ixgbe_rx_buffer *bi; | 564 | struct ixgbe_rx_buffer *bi; |
565 | unsigned int i; | 565 | unsigned int i; |
566 | unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; | ||
567 | 566 | ||
568 | i = rx_ring->next_to_use; | 567 | i = rx_ring->next_to_use; |
569 | bi = &rx_ring->rx_buffer_info[i]; | 568 | bi = &rx_ring->rx_buffer_info[i]; |
@@ -593,7 +592,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
593 | 592 | ||
594 | if (!bi->skb) { | 593 | if (!bi->skb) { |
595 | struct sk_buff *skb; | 594 | struct sk_buff *skb; |
596 | skb = netdev_alloc_skb(adapter->netdev, bufsz); | 595 | skb = netdev_alloc_skb(adapter->netdev, |
596 | (rx_ring->rx_buf_len + | ||
597 | NET_IP_ALIGN)); | ||
597 | 598 | ||
598 | if (!skb) { | 599 | if (!skb) { |
599 | adapter->alloc_rx_buff_failed++; | 600 | adapter->alloc_rx_buff_failed++; |
@@ -608,7 +609,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
608 | skb_reserve(skb, NET_IP_ALIGN); | 609 | skb_reserve(skb, NET_IP_ALIGN); |
609 | 610 | ||
610 | bi->skb = skb; | 611 | bi->skb = skb; |
611 | bi->dma = pci_map_single(pdev, skb->data, bufsz, | 612 | bi->dma = pci_map_single(pdev, skb->data, |
613 | rx_ring->rx_buf_len, | ||
612 | PCI_DMA_FROMDEVICE); | 614 | PCI_DMA_FROMDEVICE); |
613 | } | 615 | } |
614 | /* Refresh the desc even if buffer_addrs didn't change because | 616 | /* Refresh the desc even if buffer_addrs didn't change because |
@@ -732,6 +734,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
732 | pci_unmap_single(pdev, rx_buffer_info->dma, | 734 | pci_unmap_single(pdev, rx_buffer_info->dma, |
733 | rx_ring->rx_buf_len, | 735 | rx_ring->rx_buf_len, |
734 | PCI_DMA_FROMDEVICE); | 736 | PCI_DMA_FROMDEVICE); |
737 | rx_buffer_info->dma = 0; | ||
735 | skb_put(skb, len); | 738 | skb_put(skb, len); |
736 | } | 739 | } |
737 | 740 | ||
@@ -2701,7 +2704,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2701 | */ | 2704 | */ |
2702 | err = hw->phy.ops.identify(hw); | 2705 | err = hw->phy.ops.identify(hw); |
2703 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 2706 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
2704 | DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); | 2707 | dev_err(&adapter->pdev->dev, "failed to initialize because " |
2708 | "an unsupported SFP+ module type was detected.\n" | ||
2709 | "Reload the driver after installing a supported " | ||
2710 | "module.\n"); | ||
2705 | ixgbe_down(adapter); | 2711 | ixgbe_down(adapter); |
2706 | return err; | 2712 | return err; |
2707 | } | 2713 | } |
@@ -2812,9 +2818,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
2812 | } | 2818 | } |
2813 | if (!rx_buffer_info->page) | 2819 | if (!rx_buffer_info->page) |
2814 | continue; | 2820 | continue; |
2815 | pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, | 2821 | if (rx_buffer_info->page_dma) { |
2816 | PCI_DMA_FROMDEVICE); | 2822 | pci_unmap_page(pdev, rx_buffer_info->page_dma, |
2817 | rx_buffer_info->page_dma = 0; | 2823 | PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); |
2824 | rx_buffer_info->page_dma = 0; | ||
2825 | } | ||
2818 | put_page(rx_buffer_info->page); | 2826 | put_page(rx_buffer_info->page); |
2819 | rx_buffer_info->page = NULL; | 2827 | rx_buffer_info->page = NULL; |
2820 | rx_buffer_info->page_offset = 0; | 2828 | rx_buffer_info->page_offset = 0; |
@@ -3720,10 +3728,11 @@ static void ixgbe_sfp_task(struct work_struct *work) | |||
3720 | goto reschedule; | 3728 | goto reschedule; |
3721 | ret = hw->phy.ops.reset(hw); | 3729 | ret = hw->phy.ops.reset(hw); |
3722 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 3730 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
3723 | DPRINTK(PROBE, ERR, "failed to initialize because an " | 3731 | dev_err(&adapter->pdev->dev, "failed to initialize " |
3724 | "unsupported SFP+ module type was detected.\n" | 3732 | "because an unsupported SFP+ module type " |
3725 | "Reload the driver after installing a " | 3733 | "was detected.\n" |
3726 | "supported module.\n"); | 3734 | "Reload the driver after installing a " |
3735 | "supported module.\n"); | ||
3727 | unregister_netdev(adapter->netdev); | 3736 | unregister_netdev(adapter->netdev); |
3728 | } else { | 3737 | } else { |
3729 | DPRINTK(PROBE, INFO, "detected SFP+: %d\n", | 3738 | DPRINTK(PROBE, INFO, "detected SFP+: %d\n", |
@@ -4502,7 +4511,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work) | |||
4502 | u32 autoneg; | 4511 | u32 autoneg; |
4503 | 4512 | ||
4504 | adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; | 4513 | adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; |
4505 | if (hw->mac.ops.get_link_capabilities) | 4514 | autoneg = hw->phy.autoneg_advertised; |
4515 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | ||
4506 | hw->mac.ops.get_link_capabilities(hw, &autoneg, | 4516 | hw->mac.ops.get_link_capabilities(hw, &autoneg, |
4507 | &hw->mac.autoneg); | 4517 | &hw->mac.autoneg); |
4508 | if (hw->mac.ops.setup_link_speed) | 4518 | if (hw->mac.ops.setup_link_speed) |
@@ -4526,7 +4536,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
4526 | adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; | 4536 | adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; |
4527 | err = hw->phy.ops.identify_sfp(hw); | 4537 | err = hw->phy.ops.identify_sfp(hw); |
4528 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 4538 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
4529 | DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); | 4539 | dev_err(&adapter->pdev->dev, "failed to initialize because " |
4540 | "an unsupported SFP+ module type was detected.\n" | ||
4541 | "Reload the driver after installing a supported " | ||
4542 | "module.\n"); | ||
4530 | ixgbe_down(adapter); | 4543 | ixgbe_down(adapter); |
4531 | return; | 4544 | return; |
4532 | } | 4545 | } |
@@ -5513,8 +5526,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5513 | round_jiffies(jiffies + (2 * HZ))); | 5526 | round_jiffies(jiffies + (2 * HZ))); |
5514 | err = 0; | 5527 | err = 0; |
5515 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 5528 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
5516 | dev_err(&adapter->pdev->dev, "failed to load because an " | 5529 | dev_err(&adapter->pdev->dev, "failed to initialize because " |
5517 | "unsupported SFP+ module type was detected.\n"); | 5530 | "an unsupported SFP+ module type was detected.\n" |
5531 | "Reload the driver after installing a supported " | ||
5532 | "module.\n"); | ||
5518 | goto err_sw_init; | 5533 | goto err_sw_init; |
5519 | } else if (err) { | 5534 | } else if (err) { |
5520 | dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); | 5535 | dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); |
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c index dc45e9856c35..6851bdb2ce29 100644 --- a/drivers/net/mdio.c +++ b/drivers/net/mdio.c | |||
@@ -14,6 +14,10 @@ | |||
14 | #include <linux/mdio.h> | 14 | #include <linux/mdio.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | 16 | ||
17 | MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers"); | ||
18 | MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc."); | ||
19 | MODULE_LICENSE("GPL"); | ||
20 | |||
17 | /** | 21 | /** |
18 | * mdio45_probe - probe for an MDIO (clause 45) device | 22 | * mdio45_probe - probe for an MDIO (clause 45) device |
19 | * @mdio: MDIO interface | 23 | * @mdio: MDIO interface |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index e02bafdd3682..93f4abd990a9 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -668,7 +668,7 @@ int mlx4_en_start_port(struct net_device *dev) | |||
668 | queue_work(mdev->workqueue, &priv->mcast_task); | 668 | queue_work(mdev->workqueue, &priv->mcast_task); |
669 | 669 | ||
670 | priv->port_up = true; | 670 | priv->port_up = true; |
671 | netif_start_queue(dev); | 671 | netif_tx_start_all_queues(dev); |
672 | return 0; | 672 | return 0; |
673 | 673 | ||
674 | mac_err: | 674 | mac_err: |
@@ -700,14 +700,14 @@ void mlx4_en_stop_port(struct net_device *dev) | |||
700 | en_dbg(DRV, priv, "stop port called while port already down\n"); | 700 | en_dbg(DRV, priv, "stop port called while port already down\n"); |
701 | return; | 701 | return; |
702 | } | 702 | } |
703 | netif_stop_queue(dev); | ||
704 | 703 | ||
705 | /* Synchronize with tx routine */ | 704 | /* Synchronize with tx routine */ |
706 | netif_tx_lock_bh(dev); | 705 | netif_tx_lock_bh(dev); |
707 | priv->port_up = false; | 706 | netif_tx_stop_all_queues(dev); |
708 | netif_tx_unlock_bh(dev); | 707 | netif_tx_unlock_bh(dev); |
709 | 708 | ||
710 | /* close port*/ | 709 | /* close port*/ |
710 | priv->port_up = false; | ||
711 | mlx4_CLOSE_PORT(mdev->dev, priv->port); | 711 | mlx4_CLOSE_PORT(mdev->dev, priv->port); |
712 | 712 | ||
713 | /* Unregister Mac address for the port */ | 713 | /* Unregister Mac address for the port */ |
@@ -881,7 +881,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
881 | mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); | 881 | mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); |
882 | 882 | ||
883 | cancel_delayed_work(&priv->stats_task); | 883 | cancel_delayed_work(&priv->stats_task); |
884 | cancel_delayed_work(&priv->refill_task); | ||
885 | /* flush any pending task for this netdev */ | 884 | /* flush any pending task for this netdev */ |
886 | flush_workqueue(mdev->workqueue); | 885 | flush_workqueue(mdev->workqueue); |
887 | 886 | ||
@@ -986,7 +985,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
986 | spin_lock_init(&priv->stats_lock); | 985 | spin_lock_init(&priv->stats_lock); |
987 | INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); | 986 | INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); |
988 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); | 987 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); |
989 | INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill); | ||
990 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | 988 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); |
991 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | 989 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); |
992 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | 990 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); |
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 5a14899c1e25..91bdfdfd431f 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -269,31 +269,6 @@ reduce_rings: | |||
269 | return 0; | 269 | return 0; |
270 | } | 270 | } |
271 | 271 | ||
272 | static int mlx4_en_fill_rx_buf(struct net_device *dev, | ||
273 | struct mlx4_en_rx_ring *ring) | ||
274 | { | ||
275 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
276 | int num = 0; | ||
277 | int err; | ||
278 | |||
279 | while ((u32) (ring->prod - ring->cons) < ring->actual_size) { | ||
280 | err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod & | ||
281 | ring->size_mask); | ||
282 | if (err) { | ||
283 | if (netif_msg_rx_err(priv)) | ||
284 | en_warn(priv, "Failed preparing rx descriptor\n"); | ||
285 | priv->port_stats.rx_alloc_failed++; | ||
286 | break; | ||
287 | } | ||
288 | ++num; | ||
289 | ++ring->prod; | ||
290 | } | ||
291 | if ((u32) (ring->prod - ring->cons) == ring->actual_size) | ||
292 | ring->full = 1; | ||
293 | |||
294 | return num; | ||
295 | } | ||
296 | |||
297 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | 272 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, |
298 | struct mlx4_en_rx_ring *ring) | 273 | struct mlx4_en_rx_ring *ring) |
299 | { | 274 | { |
@@ -312,42 +287,6 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | |||
312 | } | 287 | } |
313 | } | 288 | } |
314 | 289 | ||
315 | |||
316 | void mlx4_en_rx_refill(struct work_struct *work) | ||
317 | { | ||
318 | struct delayed_work *delay = to_delayed_work(work); | ||
319 | struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, | ||
320 | refill_task); | ||
321 | struct mlx4_en_dev *mdev = priv->mdev; | ||
322 | struct net_device *dev = priv->dev; | ||
323 | struct mlx4_en_rx_ring *ring; | ||
324 | int need_refill = 0; | ||
325 | int i; | ||
326 | |||
327 | mutex_lock(&mdev->state_lock); | ||
328 | if (!mdev->device_up || !priv->port_up) | ||
329 | goto out; | ||
330 | |||
331 | /* We only get here if there are no receive buffers, so we can't race | ||
332 | * with Rx interrupts while filling buffers */ | ||
333 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
334 | ring = &priv->rx_ring[i]; | ||
335 | if (ring->need_refill) { | ||
336 | if (mlx4_en_fill_rx_buf(dev, ring)) { | ||
337 | ring->need_refill = 0; | ||
338 | mlx4_en_update_rx_prod_db(ring); | ||
339 | } else | ||
340 | need_refill = 1; | ||
341 | } | ||
342 | } | ||
343 | if (need_refill) | ||
344 | queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ); | ||
345 | |||
346 | out: | ||
347 | mutex_unlock(&mdev->state_lock); | ||
348 | } | ||
349 | |||
350 | |||
351 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | 290 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, |
352 | struct mlx4_en_rx_ring *ring, u32 size, u16 stride) | 291 | struct mlx4_en_rx_ring *ring, u32 size, u16 stride) |
353 | { | 292 | { |
@@ -457,9 +396,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |||
457 | ring_ind--; | 396 | ring_ind--; |
458 | goto err_allocator; | 397 | goto err_allocator; |
459 | } | 398 | } |
460 | |||
461 | /* Fill Rx buffers */ | ||
462 | ring->full = 0; | ||
463 | } | 399 | } |
464 | err = mlx4_en_fill_rx_buffers(priv); | 400 | err = mlx4_en_fill_rx_buffers(priv); |
465 | if (err) | 401 | if (err) |
@@ -647,33 +583,6 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, | |||
647 | return skb; | 583 | return skb; |
648 | } | 584 | } |
649 | 585 | ||
650 | static void mlx4_en_copy_desc(struct mlx4_en_priv *priv, | ||
651 | struct mlx4_en_rx_ring *ring, | ||
652 | int from, int to, int num) | ||
653 | { | ||
654 | struct skb_frag_struct *skb_frags_from; | ||
655 | struct skb_frag_struct *skb_frags_to; | ||
656 | struct mlx4_en_rx_desc *rx_desc_from; | ||
657 | struct mlx4_en_rx_desc *rx_desc_to; | ||
658 | int from_index, to_index; | ||
659 | int nr, i; | ||
660 | |||
661 | for (i = 0; i < num; i++) { | ||
662 | from_index = (from + i) & ring->size_mask; | ||
663 | to_index = (to + i) & ring->size_mask; | ||
664 | skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info); | ||
665 | skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info); | ||
666 | rx_desc_from = ring->buf + (from_index << ring->log_stride); | ||
667 | rx_desc_to = ring->buf + (to_index << ring->log_stride); | ||
668 | |||
669 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
670 | skb_frags_to[nr].page = skb_frags_from[nr].page; | ||
671 | skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset; | ||
672 | rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr; | ||
673 | } | ||
674 | } | ||
675 | } | ||
676 | |||
677 | 586 | ||
678 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) | 587 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) |
679 | { | 588 | { |
@@ -821,11 +730,6 @@ out: | |||
821 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ | 730 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ |
822 | ring->cons = cq->mcq.cons_index; | 731 | ring->cons = cq->mcq.cons_index; |
823 | ring->prod += polled; /* Polled descriptors were realocated in place */ | 732 | ring->prod += polled; /* Polled descriptors were realocated in place */ |
824 | if (unlikely(!ring->full)) { | ||
825 | mlx4_en_copy_desc(priv, ring, ring->cons - polled, | ||
826 | ring->prod - polled, polled); | ||
827 | mlx4_en_fill_rx_buf(dev, ring); | ||
828 | } | ||
829 | mlx4_en_update_rx_prod_db(ring); | 733 | mlx4_en_update_rx_prod_db(ring); |
830 | return polled; | 734 | return polled; |
831 | } | 735 | } |
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 5dc7466ad035..08c43f2ae72b 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -515,16 +515,9 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev, | |||
515 | else { | 515 | else { |
516 | if (netif_msg_tx_err(priv)) | 516 | if (netif_msg_tx_err(priv)) |
517 | en_warn(priv, "Non-linear headers\n"); | 517 | en_warn(priv, "Non-linear headers\n"); |
518 | dev_kfree_skb_any(skb); | ||
519 | return 0; | 518 | return 0; |
520 | } | 519 | } |
521 | } | 520 | } |
522 | if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { | ||
523 | if (netif_msg_tx_err(priv)) | ||
524 | en_warn(priv, "LSO header size too big\n"); | ||
525 | dev_kfree_skb_any(skb); | ||
526 | return 0; | ||
527 | } | ||
528 | } else { | 521 | } else { |
529 | *lso_header_size = 0; | 522 | *lso_header_size = 0; |
530 | if (!is_inline(skb, NULL)) | 523 | if (!is_inline(skb, NULL)) |
@@ -616,13 +609,9 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
616 | int lso_header_size; | 609 | int lso_header_size; |
617 | void *fragptr; | 610 | void *fragptr; |
618 | 611 | ||
619 | if (unlikely(!skb->len)) { | ||
620 | dev_kfree_skb_any(skb); | ||
621 | return NETDEV_TX_OK; | ||
622 | } | ||
623 | real_size = get_real_size(skb, dev, &lso_header_size); | 612 | real_size = get_real_size(skb, dev, &lso_header_size); |
624 | if (unlikely(!real_size)) | 613 | if (unlikely(!real_size)) |
625 | return NETDEV_TX_OK; | 614 | goto tx_drop; |
626 | 615 | ||
627 | /* Allign descriptor to TXBB size */ | 616 | /* Allign descriptor to TXBB size */ |
628 | desc_size = ALIGN(real_size, TXBB_SIZE); | 617 | desc_size = ALIGN(real_size, TXBB_SIZE); |
@@ -630,8 +619,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
630 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { | 619 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { |
631 | if (netif_msg_tx_err(priv)) | 620 | if (netif_msg_tx_err(priv)) |
632 | en_warn(priv, "Oversized header or SG list\n"); | 621 | en_warn(priv, "Oversized header or SG list\n"); |
633 | dev_kfree_skb_any(skb); | 622 | goto tx_drop; |
634 | return NETDEV_TX_OK; | ||
635 | } | 623 | } |
636 | 624 | ||
637 | tx_ind = skb->queue_mapping; | 625 | tx_ind = skb->queue_mapping; |
@@ -653,14 +641,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
653 | return NETDEV_TX_BUSY; | 641 | return NETDEV_TX_BUSY; |
654 | } | 642 | } |
655 | 643 | ||
656 | /* Now that we know what Tx ring to use */ | ||
657 | if (unlikely(!priv->port_up)) { | ||
658 | if (netif_msg_tx_err(priv)) | ||
659 | en_warn(priv, "xmit: port down!\n"); | ||
660 | dev_kfree_skb_any(skb); | ||
661 | return NETDEV_TX_OK; | ||
662 | } | ||
663 | |||
664 | /* Track current inflight packets for performance analysis */ | 644 | /* Track current inflight packets for performance analysis */ |
665 | AVG_PERF_COUNTER(priv->pstats.inflight_avg, | 645 | AVG_PERF_COUNTER(priv->pstats.inflight_avg, |
666 | (u32) (ring->prod - ring->cons - 1)); | 646 | (u32) (ring->prod - ring->cons - 1)); |
@@ -785,5 +765,10 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
785 | mlx4_en_xmit_poll(priv, tx_ind); | 765 | mlx4_en_xmit_poll(priv, tx_ind); |
786 | 766 | ||
787 | return 0; | 767 | return 0; |
768 | |||
769 | tx_drop: | ||
770 | dev_kfree_skb_any(skb); | ||
771 | priv->stats.tx_dropped++; | ||
772 | return NETDEV_TX_OK; | ||
788 | } | 773 | } |
789 | 774 | ||
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index d43a9e4c2aea..c7c5e86804ff 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h | |||
@@ -99,7 +99,6 @@ | |||
99 | #define RSS_FACTOR 2 | 99 | #define RSS_FACTOR 2 |
100 | #define TXBB_SIZE 64 | 100 | #define TXBB_SIZE 64 |
101 | #define HEADROOM (2048 / TXBB_SIZE + 1) | 101 | #define HEADROOM (2048 / TXBB_SIZE + 1) |
102 | #define MAX_LSO_HDR_SIZE 92 | ||
103 | #define STAMP_STRIDE 64 | 102 | #define STAMP_STRIDE 64 |
104 | #define STAMP_DWORDS (STAMP_STRIDE / 4) | 103 | #define STAMP_DWORDS (STAMP_STRIDE / 4) |
105 | #define STAMP_SHIFT 31 | 104 | #define STAMP_SHIFT 31 |
@@ -296,8 +295,6 @@ struct mlx4_en_rx_ring { | |||
296 | u32 prod; | 295 | u32 prod; |
297 | u32 cons; | 296 | u32 cons; |
298 | u32 buf_size; | 297 | u32 buf_size; |
299 | int need_refill; | ||
300 | int full; | ||
301 | void *buf; | 298 | void *buf; |
302 | void *rx_info; | 299 | void *rx_info; |
303 | unsigned long bytes; | 300 | unsigned long bytes; |
@@ -495,7 +492,6 @@ struct mlx4_en_priv { | |||
495 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; | 492 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; |
496 | struct work_struct mcast_task; | 493 | struct work_struct mcast_task; |
497 | struct work_struct mac_task; | 494 | struct work_struct mac_task; |
498 | struct delayed_work refill_task; | ||
499 | struct work_struct watchdog_task; | 495 | struct work_struct watchdog_task; |
500 | struct work_struct linkstate_task; | 496 | struct work_struct linkstate_task; |
501 | struct delayed_work stats_task; | 497 | struct delayed_work stats_task; |
@@ -565,7 +561,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, | |||
565 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); | 561 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); |
566 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); | 562 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); |
567 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); | 563 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); |
568 | void mlx4_en_rx_refill(struct work_struct *work); | ||
569 | void mlx4_en_rx_irq(struct mlx4_cq *mcq); | 564 | void mlx4_en_rx_irq(struct mlx4_cq *mcq); |
570 | 565 | ||
571 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); | 566 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 5887e4764d22..f96948be0a44 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -399,11 +399,14 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
399 | if (!mtts) | 399 | if (!mtts) |
400 | return -ENOMEM; | 400 | return -ENOMEM; |
401 | 401 | ||
402 | dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, | ||
403 | npages * sizeof (u64), DMA_TO_DEVICE); | ||
404 | |||
402 | for (i = 0; i < npages; ++i) | 405 | for (i = 0; i < npages; ++i) |
403 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | 406 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); |
404 | 407 | ||
405 | dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, | 408 | dma_sync_single_for_device(&dev->pdev->dev, dma_handle, |
406 | npages * sizeof (u64), DMA_TO_DEVICE); | 409 | npages * sizeof (u64), DMA_TO_DEVICE); |
407 | 410 | ||
408 | return 0; | 411 | return 0; |
409 | } | 412 | } |
@@ -547,11 +550,14 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list | |||
547 | /* Make sure MPT status is visible before writing MTT entries */ | 550 | /* Make sure MPT status is visible before writing MTT entries */ |
548 | wmb(); | 551 | wmb(); |
549 | 552 | ||
553 | dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, | ||
554 | npages * sizeof(u64), DMA_TO_DEVICE); | ||
555 | |||
550 | for (i = 0; i < npages; ++i) | 556 | for (i = 0; i < npages; ++i) |
551 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | 557 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); |
552 | 558 | ||
553 | dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, | 559 | dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, |
554 | npages * sizeof(u64), DMA_TO_DEVICE); | 560 | npages * sizeof(u64), DMA_TO_DEVICE); |
555 | 561 | ||
556 | fmr->mpt->key = cpu_to_be32(key); | 562 | fmr->mpt->key = cpu_to_be32(key); |
557 | fmr->mpt->lkey = cpu_to_be32(key); | 563 | fmr->mpt->lkey = cpu_to_be32(key); |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 745ae8b4a2e8..0f32db3e92ad 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -1750,12 +1750,12 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev) | |||
1750 | 1750 | ||
1751 | uc_addr_set(mp, dev->dev_addr); | 1751 | uc_addr_set(mp, dev->dev_addr); |
1752 | 1752 | ||
1753 | port_config = rdlp(mp, PORT_CONFIG); | 1753 | port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; |
1754 | |||
1754 | nibbles = uc_addr_filter_mask(dev); | 1755 | nibbles = uc_addr_filter_mask(dev); |
1755 | if (!nibbles) { | 1756 | if (!nibbles) { |
1756 | port_config |= UNICAST_PROMISCUOUS_MODE; | 1757 | port_config |= UNICAST_PROMISCUOUS_MODE; |
1757 | wrlp(mp, PORT_CONFIG, port_config); | 1758 | nibbles = 0xffff; |
1758 | return; | ||
1759 | } | 1759 | } |
1760 | 1760 | ||
1761 | for (i = 0; i < 16; i += 4) { | 1761 | for (i = 0; i < 16; i += 4) { |
@@ -1776,7 +1776,6 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev) | |||
1776 | wrl(mp, off, v); | 1776 | wrl(mp, off, v); |
1777 | } | 1777 | } |
1778 | 1778 | ||
1779 | port_config &= ~UNICAST_PROMISCUOUS_MODE; | ||
1780 | wrlp(mp, PORT_CONFIG, port_config); | 1779 | wrlp(mp, PORT_CONFIG, port_config); |
1781 | } | 1780 | } |
1782 | 1781 | ||
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index bdb143d2b5c7..055bb61d6e77 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -944,28 +944,31 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) | |||
944 | u32 val = 0; | 944 | u32 val = 0; |
945 | int retries = 60; | 945 | int retries = 60; |
946 | 946 | ||
947 | if (!pegtune_val) { | 947 | if (pegtune_val) |
948 | do { | 948 | return 0; |
949 | val = NXRD32(adapter, CRB_CMDPEG_STATE); | ||
950 | 949 | ||
951 | if (val == PHAN_INITIALIZE_COMPLETE || | 950 | do { |
952 | val == PHAN_INITIALIZE_ACK) | 951 | val = NXRD32(adapter, CRB_CMDPEG_STATE); |
953 | return 0; | ||
954 | 952 | ||
955 | msleep(500); | 953 | switch (val) { |
954 | case PHAN_INITIALIZE_COMPLETE: | ||
955 | case PHAN_INITIALIZE_ACK: | ||
956 | return 0; | ||
957 | case PHAN_INITIALIZE_FAILED: | ||
958 | goto out_err; | ||
959 | default: | ||
960 | break; | ||
961 | } | ||
956 | 962 | ||
957 | } while (--retries); | 963 | msleep(500); |
958 | 964 | ||
959 | if (!retries) { | 965 | } while (--retries); |
960 | pegtune_val = NXRD32(adapter, | ||
961 | NETXEN_ROMUSB_GLB_PEGTUNE_DONE); | ||
962 | printk(KERN_WARNING "netxen_phantom_init: init failed, " | ||
963 | "pegtune_val=%x\n", pegtune_val); | ||
964 | return -1; | ||
965 | } | ||
966 | } | ||
967 | 966 | ||
968 | return 0; | 967 | NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); |
968 | |||
969 | out_err: | ||
970 | dev_warn(&adapter->pdev->dev, "firmware init failed\n"); | ||
971 | return -EIO; | ||
969 | } | 972 | } |
970 | 973 | ||
971 | static int | 974 | static int |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 71daa3d5f114..2919a2d12bf4 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -705,7 +705,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw) | |||
705 | first_driver = (adapter->ahw.pci_func == 0); | 705 | first_driver = (adapter->ahw.pci_func == 0); |
706 | 706 | ||
707 | if (!first_driver) | 707 | if (!first_driver) |
708 | return 0; | 708 | goto wait_init; |
709 | 709 | ||
710 | first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); | 710 | first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); |
711 | 711 | ||
@@ -752,6 +752,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw) | |||
752 | | (_NETXEN_NIC_LINUX_SUBVERSION); | 752 | | (_NETXEN_NIC_LINUX_SUBVERSION); |
753 | NXWR32(adapter, CRB_DRIVER_VERSION, val); | 753 | NXWR32(adapter, CRB_DRIVER_VERSION, val); |
754 | 754 | ||
755 | wait_init: | ||
755 | /* Handshake with the card before we register the devices. */ | 756 | /* Handshake with the card before we register the devices. */ |
756 | err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); | 757 | err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); |
757 | if (err) { | 758 | if (err) { |
@@ -1178,6 +1179,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
1178 | free_netdev(netdev); | 1179 | free_netdev(netdev); |
1179 | } | 1180 | } |
1180 | 1181 | ||
1182 | #ifdef CONFIG_PM | ||
1181 | static int | 1183 | static int |
1182 | netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) | 1184 | netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) |
1183 | { | 1185 | { |
@@ -1242,6 +1244,7 @@ netxen_nic_resume(struct pci_dev *pdev) | |||
1242 | 1244 | ||
1243 | return 0; | 1245 | return 0; |
1244 | } | 1246 | } |
1247 | #endif | ||
1245 | 1248 | ||
1246 | static int netxen_nic_open(struct net_device *netdev) | 1249 | static int netxen_nic_open(struct net_device *netdev) |
1247 | { | 1250 | { |
@@ -1771,8 +1774,10 @@ static struct pci_driver netxen_driver = { | |||
1771 | .id_table = netxen_pci_tbl, | 1774 | .id_table = netxen_pci_tbl, |
1772 | .probe = netxen_nic_probe, | 1775 | .probe = netxen_nic_probe, |
1773 | .remove = __devexit_p(netxen_nic_remove), | 1776 | .remove = __devexit_p(netxen_nic_remove), |
1777 | #ifdef CONFIG_PM | ||
1774 | .suspend = netxen_nic_suspend, | 1778 | .suspend = netxen_nic_suspend, |
1775 | .resume = netxen_nic_resume | 1779 | .resume = netxen_nic_resume |
1780 | #endif | ||
1776 | }; | 1781 | }; |
1777 | 1782 | ||
1778 | /* Driver Registration on NetXen card */ | 1783 | /* Driver Registration on NetXen card */ |
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 6de8399d6dd9..17c116bb332c 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c | |||
@@ -356,7 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, | |||
356 | if (!skb_queue_empty(&ap->rqueue)) | 356 | if (!skb_queue_empty(&ap->rqueue)) |
357 | tasklet_schedule(&ap->tsk); | 357 | tasklet_schedule(&ap->tsk); |
358 | ap_put(ap); | 358 | ap_put(ap); |
359 | tty_unthrottle(tty); | ||
360 | } | 359 | } |
361 | 360 | ||
362 | static void | 361 | static void |
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index d2fa2db13586..aa3d39f38e22 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c | |||
@@ -397,7 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, | |||
397 | if (!skb_queue_empty(&ap->rqueue)) | 397 | if (!skb_queue_empty(&ap->rqueue)) |
398 | tasklet_schedule(&ap->tsk); | 398 | tasklet_schedule(&ap->tsk); |
399 | sp_put(ap); | 399 | sp_put(ap); |
400 | tty_unthrottle(tty); | ||
401 | } | 400 | } |
402 | 401 | ||
403 | static void | 402 | static void |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 8a823ecc99a9..3e4b67aaa6ea 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -3142,6 +3142,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3142 | (void __iomem *)port_regs; | 3142 | (void __iomem *)port_regs; |
3143 | u32 delay = 10; | 3143 | u32 delay = 10; |
3144 | int status = 0; | 3144 | int status = 0; |
3145 | unsigned long hw_flags = 0; | ||
3145 | 3146 | ||
3146 | if(ql_mii_setup(qdev)) | 3147 | if(ql_mii_setup(qdev)) |
3147 | return -1; | 3148 | return -1; |
@@ -3150,7 +3151,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3150 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 3151 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
3151 | (ISP_SERIAL_PORT_IF_WE | | 3152 | (ISP_SERIAL_PORT_IF_WE | |
3152 | (ISP_SERIAL_PORT_IF_WE << 16))); | 3153 | (ISP_SERIAL_PORT_IF_WE << 16))); |
3153 | 3154 | /* Give the PHY time to come out of reset. */ | |
3155 | mdelay(100); | ||
3154 | qdev->port_link_state = LS_DOWN; | 3156 | qdev->port_link_state = LS_DOWN; |
3155 | netif_carrier_off(qdev->ndev); | 3157 | netif_carrier_off(qdev->ndev); |
3156 | 3158 | ||
@@ -3350,7 +3352,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3350 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); | 3352 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); |
3351 | if (value & PORT_STATUS_IC) | 3353 | if (value & PORT_STATUS_IC) |
3352 | break; | 3354 | break; |
3355 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3353 | msleep(500); | 3356 | msleep(500); |
3357 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3354 | } while (--delay); | 3358 | } while (--delay); |
3355 | 3359 | ||
3356 | if (delay == 0) { | 3360 | if (delay == 0) { |
@@ -3837,7 +3841,9 @@ static void ql_reset_work(struct work_struct *work) | |||
3837 | 16) | ISP_CONTROL_RI)); | 3841 | 16) | ISP_CONTROL_RI)); |
3838 | } | 3842 | } |
3839 | 3843 | ||
3844 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3840 | ssleep(1); | 3845 | ssleep(1); |
3846 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3841 | } while (--max_wait_time); | 3847 | } while (--max_wait_time); |
3842 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 3848 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
3843 | 3849 | ||
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 4e22462684c9..4b53b58d75fc 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -51,9 +51,6 @@ | |||
51 | #define TX_BUFFS_AVAIL(tp) \ | 51 | #define TX_BUFFS_AVAIL(tp) \ |
52 | (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) | 52 | (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) |
53 | 53 | ||
54 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
55 | static const int max_interrupt_work = 20; | ||
56 | |||
57 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | 54 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). |
58 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ | 55 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ |
59 | static const int multicast_filter_limit = 32; | 56 | static const int multicast_filter_limit = 32; |
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c new file mode 100644 index 000000000000..5345e47b35ac --- /dev/null +++ b/drivers/net/s6gmac.c | |||
@@ -0,0 +1,1073 @@ | |||
1 | /* | ||
2 | * Ethernet driver for S6105 on chip network device | ||
3 | * (c)2008 emlix GmbH http://www.emlix.com | ||
4 | * Authors: Oskar Schirmer <os@emlix.com> | ||
5 | * Daniel Gloeckner <dg@emlix.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/netdevice.h> | ||
20 | #include <linux/etherdevice.h> | ||
21 | #include <linux/if.h> | ||
22 | #include <linux/stddef.h> | ||
23 | #include <linux/mii.h> | ||
24 | #include <linux/phy.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <variant/hardware.h> | ||
27 | #include <variant/dmac.h> | ||
28 | |||
29 | #define DRV_NAME "s6gmac" | ||
30 | #define DRV_PRMT DRV_NAME ": " | ||
31 | |||
32 | |||
33 | /* register declarations */ | ||
34 | |||
35 | #define S6_GMAC_MACCONF1 0x000 | ||
36 | #define S6_GMAC_MACCONF1_TXENA 0 | ||
37 | #define S6_GMAC_MACCONF1_SYNCTX 1 | ||
38 | #define S6_GMAC_MACCONF1_RXENA 2 | ||
39 | #define S6_GMAC_MACCONF1_SYNCRX 3 | ||
40 | #define S6_GMAC_MACCONF1_TXFLOWCTRL 4 | ||
41 | #define S6_GMAC_MACCONF1_RXFLOWCTRL 5 | ||
42 | #define S6_GMAC_MACCONF1_LOOPBACK 8 | ||
43 | #define S6_GMAC_MACCONF1_RESTXFUNC 16 | ||
44 | #define S6_GMAC_MACCONF1_RESRXFUNC 17 | ||
45 | #define S6_GMAC_MACCONF1_RESTXMACCTRL 18 | ||
46 | #define S6_GMAC_MACCONF1_RESRXMACCTRL 19 | ||
47 | #define S6_GMAC_MACCONF1_SIMULRES 30 | ||
48 | #define S6_GMAC_MACCONF1_SOFTRES 31 | ||
49 | #define S6_GMAC_MACCONF2 0x004 | ||
50 | #define S6_GMAC_MACCONF2_FULL 0 | ||
51 | #define S6_GMAC_MACCONF2_CRCENA 1 | ||
52 | #define S6_GMAC_MACCONF2_PADCRCENA 2 | ||
53 | #define S6_GMAC_MACCONF2_LENGTHFCHK 4 | ||
54 | #define S6_GMAC_MACCONF2_HUGEFRAMENA 5 | ||
55 | #define S6_GMAC_MACCONF2_IFMODE 8 | ||
56 | #define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1 | ||
57 | #define S6_GMAC_MACCONF2_IFMODE_BYTE 2 | ||
58 | #define S6_GMAC_MACCONF2_IFMODE_MASK 3 | ||
59 | #define S6_GMAC_MACCONF2_PREAMBLELEN 12 | ||
60 | #define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F | ||
61 | #define S6_GMAC_MACIPGIFG 0x008 | ||
62 | #define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0 | ||
63 | #define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F | ||
64 | #define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8 | ||
65 | #define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16 | ||
66 | #define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24 | ||
67 | #define S6_GMAC_MACHALFDUPLEX 0x00C | ||
68 | #define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0 | ||
69 | #define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F | ||
70 | #define S6_GMAC_MACHALFDUPLEX_RETXMAX 12 | ||
71 | #define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F | ||
72 | #define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16 | ||
73 | #define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17 | ||
74 | #define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18 | ||
75 | #define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19 | ||
76 | #define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20 | ||
77 | #define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F | ||
78 | #define S6_GMAC_MACMAXFRAMELEN 0x010 | ||
79 | #define S6_GMAC_MACMIICONF 0x020 | ||
80 | #define S6_GMAC_MACMIICONF_CSEL 0 | ||
81 | #define S6_GMAC_MACMIICONF_CSEL_DIV10 0 | ||
82 | #define S6_GMAC_MACMIICONF_CSEL_DIV12 1 | ||
83 | #define S6_GMAC_MACMIICONF_CSEL_DIV14 2 | ||
84 | #define S6_GMAC_MACMIICONF_CSEL_DIV18 3 | ||
85 | #define S6_GMAC_MACMIICONF_CSEL_DIV24 4 | ||
86 | #define S6_GMAC_MACMIICONF_CSEL_DIV34 5 | ||
87 | #define S6_GMAC_MACMIICONF_CSEL_DIV68 6 | ||
88 | #define S6_GMAC_MACMIICONF_CSEL_DIV168 7 | ||
89 | #define S6_GMAC_MACMIICONF_CSEL_MASK 7 | ||
90 | #define S6_GMAC_MACMIICONF_PREAMBLESUPR 4 | ||
91 | #define S6_GMAC_MACMIICONF_SCANAUTOINCR 5 | ||
92 | #define S6_GMAC_MACMIICMD 0x024 | ||
93 | #define S6_GMAC_MACMIICMD_READ 0 | ||
94 | #define S6_GMAC_MACMIICMD_SCAN 1 | ||
95 | #define S6_GMAC_MACMIIADDR 0x028 | ||
96 | #define S6_GMAC_MACMIIADDR_REG 0 | ||
97 | #define S6_GMAC_MACMIIADDR_REG_MASK 0x1F | ||
98 | #define S6_GMAC_MACMIIADDR_PHY 8 | ||
99 | #define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F | ||
100 | #define S6_GMAC_MACMIICTRL 0x02C | ||
101 | #define S6_GMAC_MACMIISTAT 0x030 | ||
102 | #define S6_GMAC_MACMIIINDI 0x034 | ||
103 | #define S6_GMAC_MACMIIINDI_BUSY 0 | ||
104 | #define S6_GMAC_MACMIIINDI_SCAN 1 | ||
105 | #define S6_GMAC_MACMIIINDI_INVAL 2 | ||
106 | #define S6_GMAC_MACINTERFSTAT 0x03C | ||
107 | #define S6_GMAC_MACINTERFSTAT_LINKFAIL 3 | ||
108 | #define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9 | ||
109 | #define S6_GMAC_MACSTATADDR1 0x040 | ||
110 | #define S6_GMAC_MACSTATADDR2 0x044 | ||
111 | |||
112 | #define S6_GMAC_FIFOCONF0 0x048 | ||
113 | #define S6_GMAC_FIFOCONF0_HSTRSTWT 0 | ||
114 | #define S6_GMAC_FIFOCONF0_HSTRSTSR 1 | ||
115 | #define S6_GMAC_FIFOCONF0_HSTRSTFR 2 | ||
116 | #define S6_GMAC_FIFOCONF0_HSTRSTST 3 | ||
117 | #define S6_GMAC_FIFOCONF0_HSTRSTFT 4 | ||
118 | #define S6_GMAC_FIFOCONF0_WTMENREQ 8 | ||
119 | #define S6_GMAC_FIFOCONF0_SRFENREQ 9 | ||
120 | #define S6_GMAC_FIFOCONF0_FRFENREQ 10 | ||
121 | #define S6_GMAC_FIFOCONF0_STFENREQ 11 | ||
122 | #define S6_GMAC_FIFOCONF0_FTFENREQ 12 | ||
123 | #define S6_GMAC_FIFOCONF0_WTMENRPLY 16 | ||
124 | #define S6_GMAC_FIFOCONF0_SRFENRPLY 17 | ||
125 | #define S6_GMAC_FIFOCONF0_FRFENRPLY 18 | ||
126 | #define S6_GMAC_FIFOCONF0_STFENRPLY 19 | ||
127 | #define S6_GMAC_FIFOCONF0_FTFENRPLY 20 | ||
128 | #define S6_GMAC_FIFOCONF1 0x04C | ||
129 | #define S6_GMAC_FIFOCONF2 0x050 | ||
130 | #define S6_GMAC_FIFOCONF2_CFGLWM 0 | ||
131 | #define S6_GMAC_FIFOCONF2_CFGHWM 16 | ||
132 | #define S6_GMAC_FIFOCONF3 0x054 | ||
133 | #define S6_GMAC_FIFOCONF3_CFGFTTH 0 | ||
134 | #define S6_GMAC_FIFOCONF3_CFGHWMFT 16 | ||
135 | #define S6_GMAC_FIFOCONF4 0x058 | ||
136 | #define S6_GMAC_FIFOCONF_RSV_PREVDROP 0 | ||
137 | #define S6_GMAC_FIFOCONF_RSV_RUNT 1 | ||
138 | #define S6_GMAC_FIFOCONF_RSV_FALSECAR 2 | ||
139 | #define S6_GMAC_FIFOCONF_RSV_CODEERR 3 | ||
140 | #define S6_GMAC_FIFOCONF_RSV_CRCERR 4 | ||
141 | #define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5 | ||
142 | #define S6_GMAC_FIFOCONF_RSV_LENRANGE 6 | ||
143 | #define S6_GMAC_FIFOCONF_RSV_OK 7 | ||
144 | #define S6_GMAC_FIFOCONF_RSV_MULTICAST 8 | ||
145 | #define S6_GMAC_FIFOCONF_RSV_BROADCAST 9 | ||
146 | #define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10 | ||
147 | #define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11 | ||
148 | #define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12 | ||
149 | #define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13 | ||
150 | #define S6_GMAC_FIFOCONF_RSV_VLANTAG 14 | ||
151 | #define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15 | ||
152 | #define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16 | ||
153 | #define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF | ||
154 | #define S6_GMAC_FIFOCONF5 0x05C | ||
155 | #define S6_GMAC_FIFOCONF5_DROPLT64 18 | ||
156 | #define S6_GMAC_FIFOCONF5_CFGBYTM 19 | ||
157 | #define S6_GMAC_FIFOCONF5_RXDROPSIZE 20 | ||
158 | #define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF | ||
159 | |||
160 | #define S6_GMAC_STAT_REGS 0x080 | ||
161 | #define S6_GMAC_STAT_SIZE_MIN 12 | ||
162 | #define S6_GMAC_STATTR64 0x080 | ||
163 | #define S6_GMAC_STATTR64_SIZE 18 | ||
164 | #define S6_GMAC_STATTR127 0x084 | ||
165 | #define S6_GMAC_STATTR127_SIZE 18 | ||
166 | #define S6_GMAC_STATTR255 0x088 | ||
167 | #define S6_GMAC_STATTR255_SIZE 18 | ||
168 | #define S6_GMAC_STATTR511 0x08C | ||
169 | #define S6_GMAC_STATTR511_SIZE 18 | ||
170 | #define S6_GMAC_STATTR1K 0x090 | ||
171 | #define S6_GMAC_STATTR1K_SIZE 18 | ||
172 | #define S6_GMAC_STATTRMAX 0x094 | ||
173 | #define S6_GMAC_STATTRMAX_SIZE 18 | ||
174 | #define S6_GMAC_STATTRMGV 0x098 | ||
175 | #define S6_GMAC_STATTRMGV_SIZE 18 | ||
176 | #define S6_GMAC_STATRBYT 0x09C | ||
177 | #define S6_GMAC_STATRBYT_SIZE 24 | ||
178 | #define S6_GMAC_STATRPKT 0x0A0 | ||
179 | #define S6_GMAC_STATRPKT_SIZE 18 | ||
180 | #define S6_GMAC_STATRFCS 0x0A4 | ||
181 | #define S6_GMAC_STATRFCS_SIZE 12 | ||
182 | #define S6_GMAC_STATRMCA 0x0A8 | ||
183 | #define S6_GMAC_STATRMCA_SIZE 18 | ||
184 | #define S6_GMAC_STATRBCA 0x0AC | ||
185 | #define S6_GMAC_STATRBCA_SIZE 22 | ||
186 | #define S6_GMAC_STATRXCF 0x0B0 | ||
187 | #define S6_GMAC_STATRXCF_SIZE 18 | ||
188 | #define S6_GMAC_STATRXPF 0x0B4 | ||
189 | #define S6_GMAC_STATRXPF_SIZE 12 | ||
190 | #define S6_GMAC_STATRXUO 0x0B8 | ||
191 | #define S6_GMAC_STATRXUO_SIZE 12 | ||
192 | #define S6_GMAC_STATRALN 0x0BC | ||
193 | #define S6_GMAC_STATRALN_SIZE 12 | ||
194 | #define S6_GMAC_STATRFLR 0x0C0 | ||
195 | #define S6_GMAC_STATRFLR_SIZE 16 | ||
196 | #define S6_GMAC_STATRCDE 0x0C4 | ||
197 | #define S6_GMAC_STATRCDE_SIZE 12 | ||
198 | #define S6_GMAC_STATRCSE 0x0C8 | ||
199 | #define S6_GMAC_STATRCSE_SIZE 12 | ||
200 | #define S6_GMAC_STATRUND 0x0CC | ||
201 | #define S6_GMAC_STATRUND_SIZE 12 | ||
202 | #define S6_GMAC_STATROVR 0x0D0 | ||
203 | #define S6_GMAC_STATROVR_SIZE 12 | ||
204 | #define S6_GMAC_STATRFRG 0x0D4 | ||
205 | #define S6_GMAC_STATRFRG_SIZE 12 | ||
206 | #define S6_GMAC_STATRJBR 0x0D8 | ||
207 | #define S6_GMAC_STATRJBR_SIZE 12 | ||
208 | #define S6_GMAC_STATRDRP 0x0DC | ||
209 | #define S6_GMAC_STATRDRP_SIZE 12 | ||
210 | #define S6_GMAC_STATTBYT 0x0E0 | ||
211 | #define S6_GMAC_STATTBYT_SIZE 24 | ||
212 | #define S6_GMAC_STATTPKT 0x0E4 | ||
213 | #define S6_GMAC_STATTPKT_SIZE 18 | ||
214 | #define S6_GMAC_STATTMCA 0x0E8 | ||
215 | #define S6_GMAC_STATTMCA_SIZE 18 | ||
216 | #define S6_GMAC_STATTBCA 0x0EC | ||
217 | #define S6_GMAC_STATTBCA_SIZE 18 | ||
218 | #define S6_GMAC_STATTXPF 0x0F0 | ||
219 | #define S6_GMAC_STATTXPF_SIZE 12 | ||
220 | #define S6_GMAC_STATTDFR 0x0F4 | ||
221 | #define S6_GMAC_STATTDFR_SIZE 12 | ||
222 | #define S6_GMAC_STATTEDF 0x0F8 | ||
223 | #define S6_GMAC_STATTEDF_SIZE 12 | ||
224 | #define S6_GMAC_STATTSCL 0x0FC | ||
225 | #define S6_GMAC_STATTSCL_SIZE 12 | ||
226 | #define S6_GMAC_STATTMCL 0x100 | ||
227 | #define S6_GMAC_STATTMCL_SIZE 12 | ||
228 | #define S6_GMAC_STATTLCL 0x104 | ||
229 | #define S6_GMAC_STATTLCL_SIZE 12 | ||
230 | #define S6_GMAC_STATTXCL 0x108 | ||
231 | #define S6_GMAC_STATTXCL_SIZE 12 | ||
232 | #define S6_GMAC_STATTNCL 0x10C | ||
233 | #define S6_GMAC_STATTNCL_SIZE 13 | ||
234 | #define S6_GMAC_STATTPFH 0x110 | ||
235 | #define S6_GMAC_STATTPFH_SIZE 12 | ||
236 | #define S6_GMAC_STATTDRP 0x114 | ||
237 | #define S6_GMAC_STATTDRP_SIZE 12 | ||
238 | #define S6_GMAC_STATTJBR 0x118 | ||
239 | #define S6_GMAC_STATTJBR_SIZE 12 | ||
240 | #define S6_GMAC_STATTFCS 0x11C | ||
241 | #define S6_GMAC_STATTFCS_SIZE 12 | ||
242 | #define S6_GMAC_STATTXCF 0x120 | ||
243 | #define S6_GMAC_STATTXCF_SIZE 12 | ||
244 | #define S6_GMAC_STATTOVR 0x124 | ||
245 | #define S6_GMAC_STATTOVR_SIZE 12 | ||
246 | #define S6_GMAC_STATTUND 0x128 | ||
247 | #define S6_GMAC_STATTUND_SIZE 12 | ||
248 | #define S6_GMAC_STATTFRG 0x12C | ||
249 | #define S6_GMAC_STATTFRG_SIZE 12 | ||
250 | #define S6_GMAC_STATCARRY(n) (0x130 + 4*(n)) | ||
251 | #define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n)) | ||
252 | #define S6_GMAC_STATCARRY1_RDRP 0 | ||
253 | #define S6_GMAC_STATCARRY1_RJBR 1 | ||
254 | #define S6_GMAC_STATCARRY1_RFRG 2 | ||
255 | #define S6_GMAC_STATCARRY1_ROVR 3 | ||
256 | #define S6_GMAC_STATCARRY1_RUND 4 | ||
257 | #define S6_GMAC_STATCARRY1_RCSE 5 | ||
258 | #define S6_GMAC_STATCARRY1_RCDE 6 | ||
259 | #define S6_GMAC_STATCARRY1_RFLR 7 | ||
260 | #define S6_GMAC_STATCARRY1_RALN 8 | ||
261 | #define S6_GMAC_STATCARRY1_RXUO 9 | ||
262 | #define S6_GMAC_STATCARRY1_RXPF 10 | ||
263 | #define S6_GMAC_STATCARRY1_RXCF 11 | ||
264 | #define S6_GMAC_STATCARRY1_RBCA 12 | ||
265 | #define S6_GMAC_STATCARRY1_RMCA 13 | ||
266 | #define S6_GMAC_STATCARRY1_RFCS 14 | ||
267 | #define S6_GMAC_STATCARRY1_RPKT 15 | ||
268 | #define S6_GMAC_STATCARRY1_RBYT 16 | ||
269 | #define S6_GMAC_STATCARRY1_TRMGV 25 | ||
270 | #define S6_GMAC_STATCARRY1_TRMAX 26 | ||
271 | #define S6_GMAC_STATCARRY1_TR1K 27 | ||
272 | #define S6_GMAC_STATCARRY1_TR511 28 | ||
273 | #define S6_GMAC_STATCARRY1_TR255 29 | ||
274 | #define S6_GMAC_STATCARRY1_TR127 30 | ||
275 | #define S6_GMAC_STATCARRY1_TR64 31 | ||
276 | #define S6_GMAC_STATCARRY2_TDRP 0 | ||
277 | #define S6_GMAC_STATCARRY2_TPFH 1 | ||
278 | #define S6_GMAC_STATCARRY2_TNCL 2 | ||
279 | #define S6_GMAC_STATCARRY2_TXCL 3 | ||
280 | #define S6_GMAC_STATCARRY2_TLCL 4 | ||
281 | #define S6_GMAC_STATCARRY2_TMCL 5 | ||
282 | #define S6_GMAC_STATCARRY2_TSCL 6 | ||
283 | #define S6_GMAC_STATCARRY2_TEDF 7 | ||
284 | #define S6_GMAC_STATCARRY2_TDFR 8 | ||
285 | #define S6_GMAC_STATCARRY2_TXPF 9 | ||
286 | #define S6_GMAC_STATCARRY2_TBCA 10 | ||
287 | #define S6_GMAC_STATCARRY2_TMCA 11 | ||
288 | #define S6_GMAC_STATCARRY2_TPKT 12 | ||
289 | #define S6_GMAC_STATCARRY2_TBYT 13 | ||
290 | #define S6_GMAC_STATCARRY2_TFRG 14 | ||
291 | #define S6_GMAC_STATCARRY2_TUND 15 | ||
292 | #define S6_GMAC_STATCARRY2_TOVR 16 | ||
293 | #define S6_GMAC_STATCARRY2_TXCF 17 | ||
294 | #define S6_GMAC_STATCARRY2_TFCS 18 | ||
295 | #define S6_GMAC_STATCARRY2_TJBR 19 | ||
296 | |||
297 | #define S6_GMAC_HOST_PBLKCTRL 0x140 | ||
298 | #define S6_GMAC_HOST_PBLKCTRL_TXENA 0 | ||
299 | #define S6_GMAC_HOST_PBLKCTRL_RXENA 1 | ||
300 | #define S6_GMAC_HOST_PBLKCTRL_TXSRES 2 | ||
301 | #define S6_GMAC_HOST_PBLKCTRL_RXSRES 3 | ||
302 | #define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8 | ||
303 | #define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12 | ||
304 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4 | ||
305 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5 | ||
306 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6 | ||
307 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7 | ||
308 | #define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF | ||
309 | #define S6_GMAC_HOST_PBLKCTRL_STATENA 16 | ||
310 | #define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17 | ||
311 | #define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18 | ||
312 | #define S6_GMAC_HOST_PBLKCTRL_RGMII 19 | ||
313 | #define S6_GMAC_HOST_INTMASK 0x144 | ||
314 | #define S6_GMAC_HOST_INTSTAT 0x148 | ||
315 | #define S6_GMAC_HOST_INT_TXBURSTOVER 3 | ||
316 | #define S6_GMAC_HOST_INT_TXPREWOVER 4 | ||
317 | #define S6_GMAC_HOST_INT_RXBURSTUNDER 5 | ||
318 | #define S6_GMAC_HOST_INT_RXPOSTRFULL 6 | ||
319 | #define S6_GMAC_HOST_INT_RXPOSTRUNDER 7 | ||
320 | #define S6_GMAC_HOST_RXFIFOHWM 0x14C | ||
321 | #define S6_GMAC_HOST_CTRLFRAMXP 0x150 | ||
322 | #define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n)) | ||
323 | #define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n)) | ||
324 | #define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n)) | ||
325 | #define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n)) | ||
326 | |||
327 | #define S6_GMAC_BURST_PREWR 0x1B0 | ||
328 | #define S6_GMAC_BURST_PREWR_LEN 0 | ||
329 | #define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1) | ||
330 | #define S6_GMAC_BURST_PREWR_CFE 20 | ||
331 | #define S6_GMAC_BURST_PREWR_PPE 21 | ||
332 | #define S6_GMAC_BURST_PREWR_FCS 22 | ||
333 | #define S6_GMAC_BURST_PREWR_PAD 23 | ||
334 | #define S6_GMAC_BURST_POSTRD 0x1D0 | ||
335 | #define S6_GMAC_BURST_POSTRD_LEN 0 | ||
336 | #define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1) | ||
337 | #define S6_GMAC_BURST_POSTRD_DROP 20 | ||
338 | |||
339 | |||
340 | /* data handling */ | ||
341 | |||
342 | #define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */ | ||
343 | #define S6_NUM_RX_SKB 16 | ||
344 | #define S6_MAX_FRLEN 1536 | ||
345 | |||
346 | struct s6gmac { | ||
347 | u32 reg; | ||
348 | u32 tx_dma; | ||
349 | u32 rx_dma; | ||
350 | u32 io; | ||
351 | u8 tx_chan; | ||
352 | u8 rx_chan; | ||
353 | spinlock_t lock; | ||
354 | u8 tx_skb_i, tx_skb_o; | ||
355 | u8 rx_skb_i, rx_skb_o; | ||
356 | struct sk_buff *tx_skb[S6_NUM_TX_SKB]; | ||
357 | struct sk_buff *rx_skb[S6_NUM_RX_SKB]; | ||
358 | unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)]; | ||
359 | unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)]; | ||
360 | struct phy_device *phydev; | ||
361 | struct { | ||
362 | struct mii_bus *bus; | ||
363 | int irq[PHY_MAX_ADDR]; | ||
364 | } mii; | ||
365 | struct { | ||
366 | unsigned int mbit; | ||
367 | u8 giga; | ||
368 | u8 isup; | ||
369 | u8 full; | ||
370 | } link; | ||
371 | }; | ||
372 | |||
373 | static void s6gmac_rx_fillfifo(struct s6gmac *pd) | ||
374 | { | ||
375 | struct sk_buff *skb; | ||
376 | while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) | ||
377 | && (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) | ||
378 | && (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) { | ||
379 | pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb; | ||
380 | s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan, | ||
381 | pd->io, (u32)skb->data, S6_MAX_FRLEN); | ||
382 | } | ||
383 | } | ||
384 | |||
385 | static void s6gmac_rx_interrupt(struct net_device *dev) | ||
386 | { | ||
387 | struct s6gmac *pd = netdev_priv(dev); | ||
388 | u32 pfx; | ||
389 | struct sk_buff *skb; | ||
390 | while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) > | ||
391 | s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) { | ||
392 | skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]; | ||
393 | pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD); | ||
394 | if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) { | ||
395 | dev_kfree_skb_irq(skb); | ||
396 | } else { | ||
397 | skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN) | ||
398 | & S6_GMAC_BURST_POSTRD_LEN_MASK); | ||
399 | skb->dev = dev; | ||
400 | skb->protocol = eth_type_trans(skb, dev); | ||
401 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
402 | netif_rx(skb); | ||
403 | } | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static void s6gmac_tx_interrupt(struct net_device *dev) | ||
408 | { | ||
409 | struct s6gmac *pd = netdev_priv(dev); | ||
410 | while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) > | ||
411 | s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) { | ||
412 | dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); | ||
413 | } | ||
414 | if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) | ||
415 | netif_wake_queue(dev); | ||
416 | } | ||
417 | |||
418 | struct s6gmac_statinf { | ||
419 | unsigned reg_size : 4; /* 0: unused */ | ||
420 | unsigned reg_off : 6; | ||
421 | unsigned net_index : 6; | ||
422 | }; | ||
423 | |||
424 | #define S6_STATS_B (8 * sizeof(u32)) | ||
425 | #define S6_STATS_C(b, r, f) [b] = { \ | ||
426 | BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \ | ||
427 | BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \ | ||
428 | >= (1<<4)) + \ | ||
429 | r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \ | ||
430 | BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \ | ||
431 | >= ((1<<6)-1)) + \ | ||
432 | (r - S6_GMAC_STAT_REGS) / sizeof(u32), \ | ||
433 | BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \ | ||
434 | % sizeof(unsigned long)) + \ | ||
435 | BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \ | ||
436 | / sizeof(unsigned long)) >= (1<<6))) + \ | ||
437 | BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \ | ||
438 | != sizeof(unsigned long))) + \ | ||
439 | (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)}, | ||
440 | |||
441 | static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { { | ||
442 | S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes) | ||
443 | S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets) | ||
444 | S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors) | ||
445 | S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast) | ||
446 | S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors) | ||
447 | S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors) | ||
448 | S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors) | ||
449 | S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors) | ||
450 | S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors) | ||
451 | S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors) | ||
452 | S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors) | ||
453 | S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped) | ||
454 | }, { | ||
455 | S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes) | ||
456 | S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets) | ||
457 | S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors) | ||
458 | S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors) | ||
459 | S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions) | ||
460 | S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped) | ||
461 | S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors) | ||
462 | S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors) | ||
463 | S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors) | ||
464 | S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors) | ||
465 | S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors) | ||
466 | } }; | ||
467 | |||
468 | static void s6gmac_stats_collect(struct s6gmac *pd, | ||
469 | const struct s6gmac_statinf *inf) | ||
470 | { | ||
471 | int b; | ||
472 | for (b = 0; b < S6_STATS_B; b++) { | ||
473 | if (inf[b].reg_size) { | ||
474 | pd->stats[inf[b].net_index] += | ||
475 | readl(pd->reg + S6_GMAC_STAT_REGS | ||
476 | + sizeof(u32) * inf[b].reg_off); | ||
477 | } | ||
478 | } | ||
479 | } | ||
480 | |||
481 | static void s6gmac_stats_carry(struct s6gmac *pd, | ||
482 | const struct s6gmac_statinf *inf, u32 mask) | ||
483 | { | ||
484 | int b; | ||
485 | while (mask) { | ||
486 | b = fls(mask) - 1; | ||
487 | mask &= ~(1 << b); | ||
488 | pd->carry[inf[b].net_index] += (1 << inf[b].reg_size); | ||
489 | } | ||
490 | } | ||
491 | |||
492 | static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry) | ||
493 | { | ||
494 | int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) & | ||
495 | ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry)); | ||
496 | return r; | ||
497 | } | ||
498 | |||
499 | static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry) | ||
500 | { | ||
501 | u32 mask; | ||
502 | mask = s6gmac_stats_pending(pd, carry); | ||
503 | if (mask) { | ||
504 | writel(mask, pd->reg + S6_GMAC_STATCARRY(carry)); | ||
505 | s6gmac_stats_carry(pd, &statinf[carry][0], mask); | ||
506 | } | ||
507 | } | ||
508 | |||
509 | static irqreturn_t s6gmac_interrupt(int irq, void *dev_id) | ||
510 | { | ||
511 | struct net_device *dev = (struct net_device *)dev_id; | ||
512 | struct s6gmac *pd = netdev_priv(dev); | ||
513 | if (!dev) | ||
514 | return IRQ_NONE; | ||
515 | spin_lock(&pd->lock); | ||
516 | if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan)) | ||
517 | s6gmac_rx_interrupt(dev); | ||
518 | s6gmac_rx_fillfifo(pd); | ||
519 | if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan)) | ||
520 | s6gmac_tx_interrupt(dev); | ||
521 | s6gmac_stats_interrupt(pd, 0); | ||
522 | s6gmac_stats_interrupt(pd, 1); | ||
523 | spin_unlock(&pd->lock); | ||
524 | return IRQ_HANDLED; | ||
525 | } | ||
526 | |||
527 | static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n, | ||
528 | u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi) | ||
529 | { | ||
530 | writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n)); | ||
531 | writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n)); | ||
532 | writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n)); | ||
533 | writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n)); | ||
534 | } | ||
535 | |||
536 | static inline void s6gmac_stop_device(struct net_device *dev) | ||
537 | { | ||
538 | struct s6gmac *pd = netdev_priv(dev); | ||
539 | writel(0, pd->reg + S6_GMAC_MACCONF1); | ||
540 | } | ||
541 | |||
542 | static inline void s6gmac_init_device(struct net_device *dev) | ||
543 | { | ||
544 | struct s6gmac *pd = netdev_priv(dev); | ||
545 | int is_rgmii = !!(pd->phydev->supported | ||
546 | & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)); | ||
547 | #if 0 | ||
548 | writel(1 << S6_GMAC_MACCONF1_SYNCTX | | ||
549 | 1 << S6_GMAC_MACCONF1_SYNCRX | | ||
550 | 1 << S6_GMAC_MACCONF1_TXFLOWCTRL | | ||
551 | 1 << S6_GMAC_MACCONF1_RXFLOWCTRL | | ||
552 | 1 << S6_GMAC_MACCONF1_RESTXFUNC | | ||
553 | 1 << S6_GMAC_MACCONF1_RESRXFUNC | | ||
554 | 1 << S6_GMAC_MACCONF1_RESTXMACCTRL | | ||
555 | 1 << S6_GMAC_MACCONF1_RESRXMACCTRL, | ||
556 | pd->reg + S6_GMAC_MACCONF1); | ||
557 | #endif | ||
558 | writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1); | ||
559 | udelay(1000); | ||
560 | writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA, | ||
561 | pd->reg + S6_GMAC_MACCONF1); | ||
562 | writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES | | ||
563 | 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES, | ||
564 | pd->reg + S6_GMAC_HOST_PBLKCTRL); | ||
565 | writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | | ||
566 | S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | | ||
567 | 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | | ||
568 | 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | | ||
569 | is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, | ||
570 | pd->reg + S6_GMAC_HOST_PBLKCTRL); | ||
571 | writel(1 << S6_GMAC_MACCONF1_TXENA | | ||
572 | 1 << S6_GMAC_MACCONF1_RXENA | | ||
573 | (dev->flags & IFF_LOOPBACK ? 1 : 0) | ||
574 | << S6_GMAC_MACCONF1_LOOPBACK, | ||
575 | pd->reg + S6_GMAC_MACCONF1); | ||
576 | writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ? | ||
577 | dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN, | ||
578 | pd->reg + S6_GMAC_MACMAXFRAMELEN); | ||
579 | writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL | | ||
580 | 1 << S6_GMAC_MACCONF2_PADCRCENA | | ||
581 | 1 << S6_GMAC_MACCONF2_LENGTHFCHK | | ||
582 | (pd->link.giga ? | ||
583 | S6_GMAC_MACCONF2_IFMODE_BYTE : | ||
584 | S6_GMAC_MACCONF2_IFMODE_NIBBLE) | ||
585 | << S6_GMAC_MACCONF2_IFMODE | | ||
586 | 7 << S6_GMAC_MACCONF2_PREAMBLELEN, | ||
587 | pd->reg + S6_GMAC_MACCONF2); | ||
588 | writel(0, pd->reg + S6_GMAC_MACSTATADDR1); | ||
589 | writel(0, pd->reg + S6_GMAC_MACSTATADDR2); | ||
590 | writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ | | ||
591 | 1 << S6_GMAC_FIFOCONF0_SRFENREQ | | ||
592 | 1 << S6_GMAC_FIFOCONF0_FRFENREQ | | ||
593 | 1 << S6_GMAC_FIFOCONF0_STFENREQ | | ||
594 | 1 << S6_GMAC_FIFOCONF0_FTFENREQ, | ||
595 | pd->reg + S6_GMAC_FIFOCONF0); | ||
596 | writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH | | ||
597 | 128 << S6_GMAC_FIFOCONF3_CFGHWMFT, | ||
598 | pd->reg + S6_GMAC_FIFOCONF3); | ||
599 | writel((S6_GMAC_FIFOCONF_RSV_MASK & ~( | ||
600 | 1 << S6_GMAC_FIFOCONF_RSV_RUNT | | ||
601 | 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | | ||
602 | 1 << S6_GMAC_FIFOCONF_RSV_OK | | ||
603 | 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | | ||
604 | 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | | ||
605 | 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | | ||
606 | 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | | ||
607 | 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) | | ||
608 | 1 << S6_GMAC_FIFOCONF5_DROPLT64 | | ||
609 | pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM | | ||
610 | 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE, | ||
611 | pd->reg + S6_GMAC_FIFOCONF5); | ||
612 | writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT | | ||
613 | 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | | ||
614 | 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | | ||
615 | 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | | ||
616 | 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | | ||
617 | 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | | ||
618 | 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED, | ||
619 | pd->reg + S6_GMAC_FIFOCONF4); | ||
620 | s6gmac_set_dstaddr(pd, 0, | ||
621 | 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF); | ||
622 | s6gmac_set_dstaddr(pd, 1, | ||
623 | dev->dev_addr[5] | | ||
624 | dev->dev_addr[4] << 8 | | ||
625 | dev->dev_addr[3] << 16 | | ||
626 | dev->dev_addr[2] << 24, | ||
627 | dev->dev_addr[1] | | ||
628 | dev->dev_addr[0] << 8, | ||
629 | 0xFFFFFFFF, 0x0000FFFF); | ||
630 | s6gmac_set_dstaddr(pd, 2, | ||
631 | 0x00000000, 0x00000100, 0x00000000, 0x00000100); | ||
632 | s6gmac_set_dstaddr(pd, 3, | ||
633 | 0x00000000, 0x00000000, 0x00000000, 0x00000000); | ||
634 | writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA | | ||
635 | 1 << S6_GMAC_HOST_PBLKCTRL_RXENA | | ||
636 | S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | | ||
637 | S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | | ||
638 | 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | | ||
639 | 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | | ||
640 | is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, | ||
641 | pd->reg + S6_GMAC_HOST_PBLKCTRL); | ||
642 | } | ||
643 | |||
644 | static void s6mii_enable(struct s6gmac *pd) | ||
645 | { | ||
646 | writel(readl(pd->reg + S6_GMAC_MACCONF1) & | ||
647 | ~(1 << S6_GMAC_MACCONF1_SOFTRES), | ||
648 | pd->reg + S6_GMAC_MACCONF1); | ||
649 | writel((readl(pd->reg + S6_GMAC_MACMIICONF) | ||
650 | & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL)) | ||
651 | | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL), | ||
652 | pd->reg + S6_GMAC_MACMIICONF); | ||
653 | } | ||
654 | |||
655 | static int s6mii_busy(struct s6gmac *pd, int tmo) | ||
656 | { | ||
657 | while (readl(pd->reg + S6_GMAC_MACMIIINDI)) { | ||
658 | if (--tmo == 0) | ||
659 | return -ETIME; | ||
660 | udelay(64); | ||
661 | } | ||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum) | ||
666 | { | ||
667 | struct s6gmac *pd = bus->priv; | ||
668 | s6mii_enable(pd); | ||
669 | if (s6mii_busy(pd, 256)) | ||
670 | return -ETIME; | ||
671 | writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | | ||
672 | regnum << S6_GMAC_MACMIIADDR_REG, | ||
673 | pd->reg + S6_GMAC_MACMIIADDR); | ||
674 | writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD); | ||
675 | writel(0, pd->reg + S6_GMAC_MACMIICMD); | ||
676 | if (s6mii_busy(pd, 256)) | ||
677 | return -ETIME; | ||
678 | return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT); | ||
679 | } | ||
680 | |||
681 | static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value) | ||
682 | { | ||
683 | struct s6gmac *pd = bus->priv; | ||
684 | s6mii_enable(pd); | ||
685 | if (s6mii_busy(pd, 256)) | ||
686 | return -ETIME; | ||
687 | writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | | ||
688 | regnum << S6_GMAC_MACMIIADDR_REG, | ||
689 | pd->reg + S6_GMAC_MACMIIADDR); | ||
690 | writel(value, pd->reg + S6_GMAC_MACMIICTRL); | ||
691 | if (s6mii_busy(pd, 256)) | ||
692 | return -ETIME; | ||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | static int s6mii_reset(struct mii_bus *bus) | ||
697 | { | ||
698 | struct s6gmac *pd = bus->priv; | ||
699 | s6mii_enable(pd); | ||
700 | if (s6mii_busy(pd, PHY_INIT_TIMEOUT)) | ||
701 | return -ETIME; | ||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | static void s6gmac_set_rgmii_txclock(struct s6gmac *pd) | ||
706 | { | ||
707 | u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL); | ||
708 | pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC); | ||
709 | switch (pd->link.mbit) { | ||
710 | case 10: | ||
711 | pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC; | ||
712 | break; | ||
713 | case 100: | ||
714 | pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC; | ||
715 | break; | ||
716 | case 1000: | ||
717 | pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC; | ||
718 | break; | ||
719 | default: | ||
720 | return; | ||
721 | } | ||
722 | writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL); | ||
723 | } | ||
724 | |||
725 | static inline void s6gmac_linkisup(struct net_device *dev, int isup) | ||
726 | { | ||
727 | struct s6gmac *pd = netdev_priv(dev); | ||
728 | struct phy_device *phydev = pd->phydev; | ||
729 | |||
730 | pd->link.full = phydev->duplex; | ||
731 | pd->link.giga = (phydev->speed == 1000); | ||
732 | if (pd->link.mbit != phydev->speed) { | ||
733 | pd->link.mbit = phydev->speed; | ||
734 | s6gmac_set_rgmii_txclock(pd); | ||
735 | } | ||
736 | pd->link.isup = isup; | ||
737 | if (isup) | ||
738 | netif_carrier_on(dev); | ||
739 | phy_print_status(phydev); | ||
740 | } | ||
741 | |||
742 | static void s6gmac_adjust_link(struct net_device *dev) | ||
743 | { | ||
744 | struct s6gmac *pd = netdev_priv(dev); | ||
745 | struct phy_device *phydev = pd->phydev; | ||
746 | if (pd->link.isup && | ||
747 | (!phydev->link || | ||
748 | (pd->link.mbit != phydev->speed) || | ||
749 | (pd->link.full != phydev->duplex))) { | ||
750 | pd->link.isup = 0; | ||
751 | netif_tx_disable(dev); | ||
752 | if (!phydev->link) { | ||
753 | netif_carrier_off(dev); | ||
754 | phy_print_status(phydev); | ||
755 | } | ||
756 | } | ||
757 | if (!pd->link.isup && phydev->link) { | ||
758 | if (pd->link.full != phydev->duplex) { | ||
759 | u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); | ||
760 | if (phydev->duplex) | ||
761 | maccfg |= 1 << S6_GMAC_MACCONF2_FULL; | ||
762 | else | ||
763 | maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL); | ||
764 | writel(maccfg, pd->reg + S6_GMAC_MACCONF2); | ||
765 | } | ||
766 | |||
767 | if (pd->link.giga != (phydev->speed == 1000)) { | ||
768 | u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5); | ||
769 | u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); | ||
770 | maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK | ||
771 | << S6_GMAC_MACCONF2_IFMODE); | ||
772 | if (phydev->speed == 1000) { | ||
773 | fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM; | ||
774 | maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE | ||
775 | << S6_GMAC_MACCONF2_IFMODE; | ||
776 | } else { | ||
777 | fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM); | ||
778 | maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE | ||
779 | << S6_GMAC_MACCONF2_IFMODE; | ||
780 | } | ||
781 | writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5); | ||
782 | writel(maccfg, pd->reg + S6_GMAC_MACCONF2); | ||
783 | } | ||
784 | |||
785 | if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) | ||
786 | netif_wake_queue(dev); | ||
787 | s6gmac_linkisup(dev, 1); | ||
788 | } | ||
789 | } | ||
790 | |||
791 | static inline int s6gmac_phy_start(struct net_device *dev) | ||
792 | { | ||
793 | struct s6gmac *pd = netdev_priv(dev); | ||
794 | int i = 0; | ||
795 | struct phy_device *p = NULL; | ||
796 | while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR)) | ||
797 | i++; | ||
798 | p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, | ||
799 | PHY_INTERFACE_MODE_RGMII); | ||
800 | if (IS_ERR(p)) { | ||
801 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | ||
802 | return PTR_ERR(p); | ||
803 | } | ||
804 | p->supported &= PHY_GBIT_FEATURES; | ||
805 | p->advertising = p->supported; | ||
806 | pd->phydev = p; | ||
807 | return 0; | ||
808 | } | ||
809 | |||
810 | static inline void s6gmac_init_stats(struct net_device *dev) | ||
811 | { | ||
812 | struct s6gmac *pd = netdev_priv(dev); | ||
813 | u32 mask; | ||
814 | mask = 1 << S6_GMAC_STATCARRY1_RDRP | | ||
815 | 1 << S6_GMAC_STATCARRY1_RJBR | | ||
816 | 1 << S6_GMAC_STATCARRY1_RFRG | | ||
817 | 1 << S6_GMAC_STATCARRY1_ROVR | | ||
818 | 1 << S6_GMAC_STATCARRY1_RUND | | ||
819 | 1 << S6_GMAC_STATCARRY1_RCDE | | ||
820 | 1 << S6_GMAC_STATCARRY1_RFLR | | ||
821 | 1 << S6_GMAC_STATCARRY1_RALN | | ||
822 | 1 << S6_GMAC_STATCARRY1_RMCA | | ||
823 | 1 << S6_GMAC_STATCARRY1_RFCS | | ||
824 | 1 << S6_GMAC_STATCARRY1_RPKT | | ||
825 | 1 << S6_GMAC_STATCARRY1_RBYT; | ||
826 | writel(mask, pd->reg + S6_GMAC_STATCARRY(0)); | ||
827 | writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0)); | ||
828 | mask = 1 << S6_GMAC_STATCARRY2_TDRP | | ||
829 | 1 << S6_GMAC_STATCARRY2_TNCL | | ||
830 | 1 << S6_GMAC_STATCARRY2_TXCL | | ||
831 | 1 << S6_GMAC_STATCARRY2_TEDF | | ||
832 | 1 << S6_GMAC_STATCARRY2_TPKT | | ||
833 | 1 << S6_GMAC_STATCARRY2_TBYT | | ||
834 | 1 << S6_GMAC_STATCARRY2_TFRG | | ||
835 | 1 << S6_GMAC_STATCARRY2_TUND | | ||
836 | 1 << S6_GMAC_STATCARRY2_TOVR | | ||
837 | 1 << S6_GMAC_STATCARRY2_TFCS | | ||
838 | 1 << S6_GMAC_STATCARRY2_TJBR; | ||
839 | writel(mask, pd->reg + S6_GMAC_STATCARRY(1)); | ||
840 | writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1)); | ||
841 | } | ||
842 | |||
843 | static inline void s6gmac_init_dmac(struct net_device *dev) | ||
844 | { | ||
845 | struct s6gmac *pd = netdev_priv(dev); | ||
846 | s6dmac_disable_chan(pd->tx_dma, pd->tx_chan); | ||
847 | s6dmac_disable_chan(pd->rx_dma, pd->rx_chan); | ||
848 | s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX); | ||
849 | s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX); | ||
850 | } | ||
851 | |||
852 | static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev) | ||
853 | { | ||
854 | struct s6gmac *pd = netdev_priv(dev); | ||
855 | unsigned long flags; | ||
856 | spin_lock_irqsave(&pd->lock, flags); | ||
857 | dev->trans_start = jiffies; | ||
858 | writel(skb->len << S6_GMAC_BURST_PREWR_LEN | | ||
859 | 0 << S6_GMAC_BURST_PREWR_CFE | | ||
860 | 1 << S6_GMAC_BURST_PREWR_PPE | | ||
861 | 1 << S6_GMAC_BURST_PREWR_FCS | | ||
862 | ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD, | ||
863 | pd->reg + S6_GMAC_BURST_PREWR); | ||
864 | s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan, | ||
865 | (u32)skb->data, pd->io, skb->len); | ||
866 | if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) | ||
867 | netif_stop_queue(dev); | ||
868 | if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) { | ||
869 | printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n", | ||
870 | pd->tx_skb_o, pd->tx_skb_i); | ||
871 | BUG(); | ||
872 | } | ||
873 | pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb; | ||
874 | spin_unlock_irqrestore(&pd->lock, flags); | ||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | static void s6gmac_tx_timeout(struct net_device *dev) | ||
879 | { | ||
880 | struct s6gmac *pd = netdev_priv(dev); | ||
881 | unsigned long flags; | ||
882 | spin_lock_irqsave(&pd->lock, flags); | ||
883 | s6gmac_tx_interrupt(dev); | ||
884 | spin_unlock_irqrestore(&pd->lock, flags); | ||
885 | } | ||
886 | |||
887 | static int s6gmac_open(struct net_device *dev) | ||
888 | { | ||
889 | struct s6gmac *pd = netdev_priv(dev); | ||
890 | unsigned long flags; | ||
891 | phy_read_status(pd->phydev); | ||
892 | spin_lock_irqsave(&pd->lock, flags); | ||
893 | pd->link.mbit = 0; | ||
894 | s6gmac_linkisup(dev, pd->phydev->link); | ||
895 | s6gmac_init_device(dev); | ||
896 | s6gmac_init_stats(dev); | ||
897 | s6gmac_init_dmac(dev); | ||
898 | s6gmac_rx_fillfifo(pd); | ||
899 | s6dmac_enable_chan(pd->rx_dma, pd->rx_chan, | ||
900 | 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1); | ||
901 | s6dmac_enable_chan(pd->tx_dma, pd->tx_chan, | ||
902 | 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1); | ||
903 | writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER | | ||
904 | 0 << S6_GMAC_HOST_INT_TXPREWOVER | | ||
905 | 0 << S6_GMAC_HOST_INT_RXBURSTUNDER | | ||
906 | 0 << S6_GMAC_HOST_INT_RXPOSTRFULL | | ||
907 | 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER, | ||
908 | pd->reg + S6_GMAC_HOST_INTMASK); | ||
909 | spin_unlock_irqrestore(&pd->lock, flags); | ||
910 | phy_start(pd->phydev); | ||
911 | netif_start_queue(dev); | ||
912 | return 0; | ||
913 | } | ||
914 | |||
915 | static int s6gmac_stop(struct net_device *dev) | ||
916 | { | ||
917 | struct s6gmac *pd = netdev_priv(dev); | ||
918 | unsigned long flags; | ||
919 | netif_stop_queue(dev); | ||
920 | phy_stop(pd->phydev); | ||
921 | spin_lock_irqsave(&pd->lock, flags); | ||
922 | s6gmac_init_dmac(dev); | ||
923 | s6gmac_stop_device(dev); | ||
924 | while (pd->tx_skb_i != pd->tx_skb_o) | ||
925 | dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); | ||
926 | while (pd->rx_skb_i != pd->rx_skb_o) | ||
927 | dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]); | ||
928 | spin_unlock_irqrestore(&pd->lock, flags); | ||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | static struct net_device_stats *s6gmac_stats(struct net_device *dev) | ||
933 | { | ||
934 | struct s6gmac *pd = netdev_priv(dev); | ||
935 | struct net_device_stats *st = (struct net_device_stats *)&pd->stats; | ||
936 | int i; | ||
937 | do { | ||
938 | unsigned long flags; | ||
939 | spin_lock_irqsave(&pd->lock, flags); | ||
940 | for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++) | ||
941 | pd->stats[i] = | ||
942 | pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1); | ||
943 | s6gmac_stats_collect(pd, &statinf[0][0]); | ||
944 | s6gmac_stats_collect(pd, &statinf[1][0]); | ||
945 | i = s6gmac_stats_pending(pd, 0) | | ||
946 | s6gmac_stats_pending(pd, 1); | ||
947 | spin_unlock_irqrestore(&pd->lock, flags); | ||
948 | } while (i); | ||
949 | st->rx_errors = st->rx_crc_errors + | ||
950 | st->rx_frame_errors + | ||
951 | st->rx_length_errors + | ||
952 | st->rx_missed_errors; | ||
953 | st->tx_errors += st->tx_aborted_errors; | ||
954 | return st; | ||
955 | } | ||
956 | |||
957 | static int __devinit s6gmac_probe(struct platform_device *pdev) | ||
958 | { | ||
959 | struct net_device *dev; | ||
960 | struct s6gmac *pd; | ||
961 | int res; | ||
962 | unsigned long i; | ||
963 | struct mii_bus *mb; | ||
964 | dev = alloc_etherdev(sizeof(*pd)); | ||
965 | if (!dev) { | ||
966 | printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n"); | ||
967 | return -ENOMEM; | ||
968 | } | ||
969 | dev->open = s6gmac_open; | ||
970 | dev->stop = s6gmac_stop; | ||
971 | dev->hard_start_xmit = s6gmac_tx; | ||
972 | dev->tx_timeout = s6gmac_tx_timeout; | ||
973 | dev->watchdog_timeo = HZ; | ||
974 | dev->get_stats = s6gmac_stats; | ||
975 | dev->irq = platform_get_irq(pdev, 0); | ||
976 | pd = netdev_priv(dev); | ||
977 | memset(pd, 0, sizeof(*pd)); | ||
978 | spin_lock_init(&pd->lock); | ||
979 | pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start; | ||
980 | i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start; | ||
981 | pd->tx_dma = DMA_MASK_DMAC(i); | ||
982 | pd->tx_chan = DMA_INDEX_CHNL(i); | ||
983 | i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start; | ||
984 | pd->rx_dma = DMA_MASK_DMAC(i); | ||
985 | pd->rx_chan = DMA_INDEX_CHNL(i); | ||
986 | pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; | ||
987 | res = request_irq(dev->irq, &s6gmac_interrupt, 0, dev->name, dev); | ||
988 | if (res) { | ||
989 | printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq); | ||
990 | goto errirq; | ||
991 | } | ||
992 | res = register_netdev(dev); | ||
993 | if (res) { | ||
994 | printk(KERN_ERR DRV_PRMT "error registering device %s\n", | ||
995 | dev->name); | ||
996 | goto errdev; | ||
997 | } | ||
998 | mb = mdiobus_alloc(); | ||
999 | if (!mb) { | ||
1000 | printk(KERN_ERR DRV_PRMT "error allocating mii bus\n"); | ||
1001 | goto errmii; | ||
1002 | } | ||
1003 | mb->name = "s6gmac_mii"; | ||
1004 | mb->read = s6mii_read; | ||
1005 | mb->write = s6mii_write; | ||
1006 | mb->reset = s6mii_reset; | ||
1007 | mb->priv = pd; | ||
1008 | snprintf(mb->id, MII_BUS_ID_SIZE, "0"); | ||
1009 | mb->phy_mask = ~(1 << 0); | ||
1010 | mb->irq = &pd->mii.irq[0]; | ||
1011 | for (i = 0; i < PHY_MAX_ADDR; i++) { | ||
1012 | int n = platform_get_irq(pdev, i + 1); | ||
1013 | if (n < 0) | ||
1014 | n = PHY_POLL; | ||
1015 | pd->mii.irq[i] = n; | ||
1016 | } | ||
1017 | mdiobus_register(mb); | ||
1018 | pd->mii.bus = mb; | ||
1019 | res = s6gmac_phy_start(dev); | ||
1020 | if (res) | ||
1021 | return res; | ||
1022 | platform_set_drvdata(pdev, dev); | ||
1023 | return 0; | ||
1024 | errmii: | ||
1025 | unregister_netdev(dev); | ||
1026 | errdev: | ||
1027 | free_irq(dev->irq, dev); | ||
1028 | errirq: | ||
1029 | free_netdev(dev); | ||
1030 | return res; | ||
1031 | } | ||
1032 | |||
1033 | static int __devexit s6gmac_remove(struct platform_device *pdev) | ||
1034 | { | ||
1035 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1036 | if (dev) { | ||
1037 | struct s6gmac *pd = netdev_priv(dev); | ||
1038 | mdiobus_unregister(pd->mii.bus); | ||
1039 | unregister_netdev(dev); | ||
1040 | free_irq(dev->irq, dev); | ||
1041 | free_netdev(dev); | ||
1042 | platform_set_drvdata(pdev, NULL); | ||
1043 | } | ||
1044 | return 0; | ||
1045 | } | ||
1046 | |||
1047 | static struct platform_driver s6gmac_driver = { | ||
1048 | .probe = s6gmac_probe, | ||
1049 | .remove = __devexit_p(s6gmac_remove), | ||
1050 | .driver = { | ||
1051 | .name = "s6gmac", | ||
1052 | .owner = THIS_MODULE, | ||
1053 | }, | ||
1054 | }; | ||
1055 | |||
1056 | static int __init s6gmac_init(void) | ||
1057 | { | ||
1058 | printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n"); | ||
1059 | return platform_driver_register(&s6gmac_driver); | ||
1060 | } | ||
1061 | |||
1062 | |||
1063 | static void __exit s6gmac_exit(void) | ||
1064 | { | ||
1065 | platform_driver_unregister(&s6gmac_driver); | ||
1066 | } | ||
1067 | |||
1068 | module_init(s6gmac_init); | ||
1069 | module_exit(s6gmac_exit); | ||
1070 | |||
1071 | MODULE_LICENSE("GPL"); | ||
1072 | MODULE_DESCRIPTION("S6105 on chip Ethernet driver"); | ||
1073 | MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>"); | ||
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 341882f959f3..a2d82ddb3b4d 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
865 | struct sh_eth_private *mdp = netdev_priv(ndev); | 865 | struct sh_eth_private *mdp = netdev_priv(ndev); |
866 | struct sh_eth_cpu_data *cd = mdp->cd; | 866 | struct sh_eth_cpu_data *cd = mdp->cd; |
867 | irqreturn_t ret = IRQ_NONE; | 867 | irqreturn_t ret = IRQ_NONE; |
868 | u32 ioaddr, boguscnt = RX_RING_SIZE; | 868 | u32 ioaddr, intr_status = 0; |
869 | u32 intr_status = 0; | ||
870 | 869 | ||
871 | ioaddr = ndev->base_addr; | 870 | ioaddr = ndev->base_addr; |
872 | spin_lock(&mdp->lock); | 871 | spin_lock(&mdp->lock); |
@@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
901 | if (intr_status & cd->eesr_err_check) | 900 | if (intr_status & cd->eesr_err_check) |
902 | sh_eth_error(ndev, intr_status); | 901 | sh_eth_error(ndev, intr_status); |
903 | 902 | ||
904 | if (--boguscnt < 0) { | ||
905 | printk(KERN_WARNING | ||
906 | "%s: Too much work at interrupt, status=0x%4.4x.\n", | ||
907 | ndev->name, intr_status); | ||
908 | } | ||
909 | |||
910 | other_irq: | 903 | other_irq: |
911 | spin_unlock(&mdp->lock); | 904 | spin_unlock(&mdp->lock); |
912 | 905 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 7681d28c53d7..daf961ab68bc 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -2495,7 +2495,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) | |||
2495 | if (likely(status >> 16 == (status & 0xffff))) { | 2495 | if (likely(status >> 16 == (status & 0xffff))) { |
2496 | skb = sky2->rx_ring[sky2->rx_next].skb; | 2496 | skb = sky2->rx_ring[sky2->rx_next].skb; |
2497 | skb->ip_summed = CHECKSUM_COMPLETE; | 2497 | skb->ip_summed = CHECKSUM_COMPLETE; |
2498 | skb->csum = status & 0xffff; | 2498 | skb->csum = le16_to_cpu(status); |
2499 | } else { | 2499 | } else { |
2500 | printk(KERN_NOTICE PFX "%s: hardware receive " | 2500 | printk(KERN_NOTICE PFX "%s: hardware receive " |
2501 | "checksum problem (status = %#x)\n", | 2501 | "checksum problem (status = %#x)\n", |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 3717569828bf..a906d3998131 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -169,10 +169,12 @@ config USB_NET_CDCETHER | |||
169 | The Linux-USB CDC Ethernet Gadget driver is an open implementation. | 169 | The Linux-USB CDC Ethernet Gadget driver is an open implementation. |
170 | This driver should work with at least the following devices: | 170 | This driver should work with at least the following devices: |
171 | 171 | ||
172 | * Dell Wireless 5530 HSPA | ||
172 | * Ericsson PipeRider (all variants) | 173 | * Ericsson PipeRider (all variants) |
174 | * Ericsson Mobile Broadband Module (all variants) | ||
173 | * Motorola (DM100 and SB4100) | 175 | * Motorola (DM100 and SB4100) |
174 | * Broadcom Cable Modem (reference design) | 176 | * Broadcom Cable Modem (reference design) |
175 | * Toshiba PCX1100U | 177 | * Toshiba (PCX1100U and F3507g) |
176 | * ... | 178 | * ... |
177 | 179 | ||
178 | This driver creates an interface named "ethX", where X depends on | 180 | This driver creates an interface named "ethX", where X depends on |
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index 80e01778dd3b..cd35d50e46d4 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c | |||
@@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
319 | return crc == crc2; | 319 | return crc == crc2; |
320 | 320 | ||
321 | if (unlikely(crc != crc2)) { | 321 | if (unlikely(crc != crc2)) { |
322 | dev->stats.rx_errors++; | 322 | dev->net->stats.rx_errors++; |
323 | dev_kfree_skb_any(skb2); | 323 | dev_kfree_skb_any(skb2); |
324 | } else | 324 | } else |
325 | usbnet_skb_return(dev, skb2); | 325 | usbnet_skb_return(dev, skb2); |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 01fd528306ec..4a6aff579403 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -533,6 +533,31 @@ static const struct usb_device_id products [] = { | |||
533 | USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM, | 533 | USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM, |
534 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | 534 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), |
535 | .driver_info = (unsigned long) &cdc_info, | 535 | .driver_info = (unsigned long) &cdc_info, |
536 | }, { | ||
537 | /* Ericsson F3507g ver. 2 */ | ||
538 | USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM, | ||
539 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | ||
540 | .driver_info = (unsigned long) &cdc_info, | ||
541 | }, { | ||
542 | /* Ericsson F3607gw */ | ||
543 | USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM, | ||
544 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | ||
545 | .driver_info = (unsigned long) &cdc_info, | ||
546 | }, { | ||
547 | /* Ericsson F3307 */ | ||
548 | USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM, | ||
549 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | ||
550 | .driver_info = (unsigned long) &cdc_info, | ||
551 | }, { | ||
552 | /* Toshiba F3507g */ | ||
553 | USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, | ||
554 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | ||
555 | .driver_info = (unsigned long) &cdc_info, | ||
556 | }, { | ||
557 | /* Dell F3507g */ | ||
558 | USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM, | ||
559 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | ||
560 | .driver_info = (unsigned long) &cdc_info, | ||
536 | }, | 561 | }, |
537 | { }, // END | 562 | { }, // END |
538 | }; | 563 | }; |
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c index c66b9c324f54..ca39ace0b0eb 100644 --- a/drivers/net/usb/cdc_subset.c +++ b/drivers/net/usb/cdc_subset.c | |||
@@ -307,9 +307,10 @@ static const struct usb_device_id products [] = { | |||
307 | USB_DEVICE (0x1286, 0x8001), // "blob" bootloader | 307 | USB_DEVICE (0x1286, 0x8001), // "blob" bootloader |
308 | .driver_info = (unsigned long) &blob_info, | 308 | .driver_info = (unsigned long) &blob_info, |
309 | }, { | 309 | }, { |
310 | // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config | 310 | // Linux Ethernet/RNDIS gadget, mostly on PXA, second config |
311 | // e.g. Gumstix, current OpenZaurus, ... | 311 | // e.g. Gumstix, current OpenZaurus, ... or anything else |
312 | USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203), | 312 | // that just enables this gadget option. |
313 | USB_DEVICE (0x0525, 0xa4a2), | ||
313 | .driver_info = (unsigned long) &linuxdev_info, | 314 | .driver_info = (unsigned long) &linuxdev_info, |
314 | }, | 315 | }, |
315 | #endif | 316 | #endif |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 7ae82446b93a..1d3730d6690f 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
513 | len = (skb->data[1] | (skb->data[2] << 8)) - 4; | 513 | len = (skb->data[1] | (skb->data[2] << 8)) - 4; |
514 | 514 | ||
515 | if (unlikely(status & 0xbf)) { | 515 | if (unlikely(status & 0xbf)) { |
516 | if (status & 0x01) dev->stats.rx_fifo_errors++; | 516 | if (status & 0x01) dev->net->stats.rx_fifo_errors++; |
517 | if (status & 0x02) dev->stats.rx_crc_errors++; | 517 | if (status & 0x02) dev->net->stats.rx_crc_errors++; |
518 | if (status & 0x04) dev->stats.rx_frame_errors++; | 518 | if (status & 0x04) dev->net->stats.rx_frame_errors++; |
519 | if (status & 0x20) dev->stats.rx_missed_errors++; | 519 | if (status & 0x20) dev->net->stats.rx_missed_errors++; |
520 | if (status & 0x90) dev->stats.rx_length_errors++; | 520 | if (status & 0x90) dev->net->stats.rx_length_errors++; |
521 | return 0; | 521 | return 0; |
522 | } | 522 | } |
523 | 523 | ||
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index 034e8a73ca6b..aeb1ab03a9ee 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c | |||
@@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
433 | dbg("rx framesize %d range %d..%d mtu %d", skb->len, | 433 | dbg("rx framesize %d range %d..%d mtu %d", skb->len, |
434 | net->hard_header_len, dev->hard_mtu, net->mtu); | 434 | net->hard_header_len, dev->hard_mtu, net->mtu); |
435 | #endif | 435 | #endif |
436 | dev->stats.rx_frame_errors++; | 436 | dev->net->stats.rx_frame_errors++; |
437 | nc_ensure_sync(dev); | 437 | nc_ensure_sync(dev); |
438 | return 0; | 438 | return 0; |
439 | } | 439 | } |
@@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
442 | hdr_len = le16_to_cpup(&header->hdr_len); | 442 | hdr_len = le16_to_cpup(&header->hdr_len); |
443 | packet_len = le16_to_cpup(&header->packet_len); | 443 | packet_len = le16_to_cpup(&header->packet_len); |
444 | if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { | 444 | if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { |
445 | dev->stats.rx_frame_errors++; | 445 | dev->net->stats.rx_frame_errors++; |
446 | dbg("packet too big, %d", packet_len); | 446 | dbg("packet too big, %d", packet_len); |
447 | nc_ensure_sync(dev); | 447 | nc_ensure_sync(dev); |
448 | return 0; | 448 | return 0; |
449 | } else if (hdr_len < MIN_HEADER) { | 449 | } else if (hdr_len < MIN_HEADER) { |
450 | dev->stats.rx_frame_errors++; | 450 | dev->net->stats.rx_frame_errors++; |
451 | dbg("header too short, %d", hdr_len); | 451 | dbg("header too short, %d", hdr_len); |
452 | nc_ensure_sync(dev); | 452 | nc_ensure_sync(dev); |
453 | return 0; | 453 | return 0; |
@@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
465 | 465 | ||
466 | if ((packet_len & 0x01) == 0) { | 466 | if ((packet_len & 0x01) == 0) { |
467 | if (skb->data [packet_len] != PAD_BYTE) { | 467 | if (skb->data [packet_len] != PAD_BYTE) { |
468 | dev->stats.rx_frame_errors++; | 468 | dev->net->stats.rx_frame_errors++; |
469 | dbg("bad pad"); | 469 | dbg("bad pad"); |
470 | return 0; | 470 | return 0; |
471 | } | 471 | } |
472 | skb_trim(skb, skb->len - 1); | 472 | skb_trim(skb, skb->len - 1); |
473 | } | 473 | } |
474 | if (skb->len != packet_len) { | 474 | if (skb->len != packet_len) { |
475 | dev->stats.rx_frame_errors++; | 475 | dev->net->stats.rx_frame_errors++; |
476 | dbg("bad packet len %d (expected %d)", | 476 | dbg("bad packet len %d (expected %d)", |
477 | skb->len, packet_len); | 477 | skb->len, packet_len); |
478 | nc_ensure_sync(dev); | 478 | nc_ensure_sync(dev); |
479 | return 0; | 479 | return 0; |
480 | } | 480 | } |
481 | if (header->packet_id != get_unaligned(&trailer->packet_id)) { | 481 | if (header->packet_id != get_unaligned(&trailer->packet_id)) { |
482 | dev->stats.rx_fifo_errors++; | 482 | dev->net->stats.rx_fifo_errors++; |
483 | dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", | 483 | dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", |
484 | le16_to_cpu(header->packet_id), | 484 | le16_to_cpu(header->packet_id), |
485 | le16_to_cpu(trailer->packet_id)); | 485 | le16_to_cpu(trailer->packet_id)); |
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 2138535f2339..73acbd244aa1 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c | |||
@@ -297,7 +297,7 @@ static int update_eth_regs_async(pegasus_t * pegasus) | |||
297 | 297 | ||
298 | pegasus->dr.bRequestType = PEGASUS_REQT_WRITE; | 298 | pegasus->dr.bRequestType = PEGASUS_REQT_WRITE; |
299 | pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS; | 299 | pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS; |
300 | pegasus->dr.wValue = 0; | 300 | pegasus->dr.wValue = cpu_to_le16(0); |
301 | pegasus->dr.wIndex = cpu_to_le16(EthCtrl0); | 301 | pegasus->dr.wIndex = cpu_to_le16(EthCtrl0); |
302 | pegasus->dr.wLength = cpu_to_le16(3); | 302 | pegasus->dr.wLength = cpu_to_le16(3); |
303 | pegasus->ctrl_urb->transfer_buffer_length = 3; | 303 | pegasus->ctrl_urb->transfer_buffer_length = 3; |
@@ -446,11 +446,12 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data) | |||
446 | int i; | 446 | int i; |
447 | __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE }; | 447 | __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE }; |
448 | int ret; | 448 | int ret; |
449 | __le16 le_data = cpu_to_le16(data); | ||
449 | 450 | ||
450 | set_registers(pegasus, EpromOffset, 4, d); | 451 | set_registers(pegasus, EpromOffset, 4, d); |
451 | enable_eprom_write(pegasus); | 452 | enable_eprom_write(pegasus); |
452 | set_register(pegasus, EpromOffset, index); | 453 | set_register(pegasus, EpromOffset, index); |
453 | set_registers(pegasus, EpromData, 2, &data); | 454 | set_registers(pegasus, EpromData, 2, &le_data); |
454 | set_register(pegasus, EpromCtrl, EPROM_WRITE); | 455 | set_register(pegasus, EpromCtrl, EPROM_WRITE); |
455 | 456 | ||
456 | for (i = 0; i < REG_TIMEOUT; i++) { | 457 | for (i = 0; i < REG_TIMEOUT; i++) { |
@@ -923,29 +924,32 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev) | |||
923 | 924 | ||
924 | static inline void disable_net_traffic(pegasus_t * pegasus) | 925 | static inline void disable_net_traffic(pegasus_t * pegasus) |
925 | { | 926 | { |
926 | int tmp = 0; | 927 | __le16 tmp = cpu_to_le16(0); |
927 | 928 | ||
928 | set_registers(pegasus, EthCtrl0, 2, &tmp); | 929 | set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); |
929 | } | 930 | } |
930 | 931 | ||
931 | static inline void get_interrupt_interval(pegasus_t * pegasus) | 932 | static inline void get_interrupt_interval(pegasus_t * pegasus) |
932 | { | 933 | { |
933 | __u8 data[2]; | 934 | u16 data; |
935 | u8 interval; | ||
934 | 936 | ||
935 | read_eprom_word(pegasus, 4, (__u16 *) data); | 937 | read_eprom_word(pegasus, 4, &data); |
938 | interval = data >> 8; | ||
936 | if (pegasus->usb->speed != USB_SPEED_HIGH) { | 939 | if (pegasus->usb->speed != USB_SPEED_HIGH) { |
937 | if (data[1] < 0x80) { | 940 | if (interval < 0x80) { |
938 | if (netif_msg_timer(pegasus)) | 941 | if (netif_msg_timer(pegasus)) |
939 | dev_info(&pegasus->intf->dev, "intr interval " | 942 | dev_info(&pegasus->intf->dev, "intr interval " |
940 | "changed from %ums to %ums\n", | 943 | "changed from %ums to %ums\n", |
941 | data[1], 0x80); | 944 | interval, 0x80); |
942 | data[1] = 0x80; | 945 | interval = 0x80; |
946 | data = (data & 0x00FF) | ((u16)interval << 8); | ||
943 | #ifdef PEGASUS_WRITE_EEPROM | 947 | #ifdef PEGASUS_WRITE_EEPROM |
944 | write_eprom_word(pegasus, 4, *(__u16 *) data); | 948 | write_eprom_word(pegasus, 4, data); |
945 | #endif | 949 | #endif |
946 | } | 950 | } |
947 | } | 951 | } |
948 | pegasus->intr_interval = data[1]; | 952 | pegasus->intr_interval = interval; |
949 | } | 953 | } |
950 | 954 | ||
951 | static void set_carrier(struct net_device *net) | 955 | static void set_carrier(struct net_device *net) |
@@ -1299,7 +1303,8 @@ static int pegasus_blacklisted(struct usb_device *udev) | |||
1299 | /* Special quirk to keep the driver from handling the Belkin Bluetooth | 1303 | /* Special quirk to keep the driver from handling the Belkin Bluetooth |
1300 | * dongle which happens to have the same ID. | 1304 | * dongle which happens to have the same ID. |
1301 | */ | 1305 | */ |
1302 | if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) && | 1306 | if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) && |
1307 | (udd->idProduct == cpu_to_le16(0x0121)) && | ||
1303 | (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) && | 1308 | (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) && |
1304 | (udd->bDeviceProtocol == 1)) | 1309 | (udd->bDeviceProtocol == 1)) |
1305 | return 1; | 1310 | return 1; |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 1bf243ef950e..2232232b7989 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
487 | if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET | 487 | if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET |
488 | || skb->len < msg_len | 488 | || skb->len < msg_len |
489 | || (data_offset + data_len + 8) > msg_len)) { | 489 | || (data_offset + data_len + 8) > msg_len)) { |
490 | dev->stats.rx_frame_errors++; | 490 | dev->net->stats.rx_frame_errors++; |
491 | devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d", | 491 | devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d", |
492 | le32_to_cpu(hdr->msg_type), | 492 | le32_to_cpu(hdr->msg_type), |
493 | msg_len, data_offset, data_len, skb->len); | 493 | msg_len, data_offset, data_len, skb->len); |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 89a91f8c22de..fe045896406b 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
1108 | if (unlikely(header & RX_STS_ES_)) { | 1108 | if (unlikely(header & RX_STS_ES_)) { |
1109 | if (netif_msg_rx_err(dev)) | 1109 | if (netif_msg_rx_err(dev)) |
1110 | devdbg(dev, "Error header=0x%08x", header); | 1110 | devdbg(dev, "Error header=0x%08x", header); |
1111 | dev->stats.rx_errors++; | 1111 | dev->net->stats.rx_errors++; |
1112 | dev->stats.rx_dropped++; | 1112 | dev->net->stats.rx_dropped++; |
1113 | 1113 | ||
1114 | if (header & RX_STS_CRC_) { | 1114 | if (header & RX_STS_CRC_) { |
1115 | dev->stats.rx_crc_errors++; | 1115 | dev->net->stats.rx_crc_errors++; |
1116 | } else { | 1116 | } else { |
1117 | if (header & (RX_STS_TL_ | RX_STS_RF_)) | 1117 | if (header & (RX_STS_TL_ | RX_STS_RF_)) |
1118 | dev->stats.rx_frame_errors++; | 1118 | dev->net->stats.rx_frame_errors++; |
1119 | 1119 | ||
1120 | if ((header & RX_STS_LE_) && | 1120 | if ((header & RX_STS_LE_) && |
1121 | (!(header & RX_STS_FT_))) | 1121 | (!(header & RX_STS_FT_))) |
1122 | dev->stats.rx_length_errors++; | 1122 | dev->net->stats.rx_length_errors++; |
1123 | } | 1123 | } |
1124 | } else { | 1124 | } else { |
1125 | /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ | 1125 | /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 22c0585a0319..edfd9e10ceba 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) | |||
234 | int status; | 234 | int status; |
235 | 235 | ||
236 | skb->protocol = eth_type_trans (skb, dev->net); | 236 | skb->protocol = eth_type_trans (skb, dev->net); |
237 | dev->stats.rx_packets++; | 237 | dev->net->stats.rx_packets++; |
238 | dev->stats.rx_bytes += skb->len; | 238 | dev->net->stats.rx_bytes += skb->len; |
239 | 239 | ||
240 | if (netif_msg_rx_status (dev)) | 240 | if (netif_msg_rx_status (dev)) |
241 | devdbg (dev, "< rx, len %zu, type 0x%x", | 241 | devdbg (dev, "< rx, len %zu, type 0x%x", |
@@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) | |||
397 | if (netif_msg_rx_err (dev)) | 397 | if (netif_msg_rx_err (dev)) |
398 | devdbg (dev, "drop"); | 398 | devdbg (dev, "drop"); |
399 | error: | 399 | error: |
400 | dev->stats.rx_errors++; | 400 | dev->net->stats.rx_errors++; |
401 | skb_queue_tail (&dev->done, skb); | 401 | skb_queue_tail (&dev->done, skb); |
402 | } | 402 | } |
403 | } | 403 | } |
@@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb) | |||
420 | case 0: | 420 | case 0: |
421 | if (skb->len < dev->net->hard_header_len) { | 421 | if (skb->len < dev->net->hard_header_len) { |
422 | entry->state = rx_cleanup; | 422 | entry->state = rx_cleanup; |
423 | dev->stats.rx_errors++; | 423 | dev->net->stats.rx_errors++; |
424 | dev->stats.rx_length_errors++; | 424 | dev->net->stats.rx_length_errors++; |
425 | if (netif_msg_rx_err (dev)) | 425 | if (netif_msg_rx_err (dev)) |
426 | devdbg (dev, "rx length %d", skb->len); | 426 | devdbg (dev, "rx length %d", skb->len); |
427 | } | 427 | } |
@@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb) | |||
433 | * storm, recovering as needed. | 433 | * storm, recovering as needed. |
434 | */ | 434 | */ |
435 | case -EPIPE: | 435 | case -EPIPE: |
436 | dev->stats.rx_errors++; | 436 | dev->net->stats.rx_errors++; |
437 | usbnet_defer_kevent (dev, EVENT_RX_HALT); | 437 | usbnet_defer_kevent (dev, EVENT_RX_HALT); |
438 | // FALLTHROUGH | 438 | // FALLTHROUGH |
439 | 439 | ||
@@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb) | |||
451 | case -EPROTO: | 451 | case -EPROTO: |
452 | case -ETIME: | 452 | case -ETIME: |
453 | case -EILSEQ: | 453 | case -EILSEQ: |
454 | dev->stats.rx_errors++; | 454 | dev->net->stats.rx_errors++; |
455 | if (!timer_pending (&dev->delay)) { | 455 | if (!timer_pending (&dev->delay)) { |
456 | mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); | 456 | mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); |
457 | if (netif_msg_link (dev)) | 457 | if (netif_msg_link (dev)) |
@@ -465,12 +465,12 @@ block: | |||
465 | 465 | ||
466 | /* data overrun ... flush fifo? */ | 466 | /* data overrun ... flush fifo? */ |
467 | case -EOVERFLOW: | 467 | case -EOVERFLOW: |
468 | dev->stats.rx_over_errors++; | 468 | dev->net->stats.rx_over_errors++; |
469 | // FALLTHROUGH | 469 | // FALLTHROUGH |
470 | 470 | ||
471 | default: | 471 | default: |
472 | entry->state = rx_cleanup; | 472 | entry->state = rx_cleanup; |
473 | dev->stats.rx_errors++; | 473 | dev->net->stats.rx_errors++; |
474 | if (netif_msg_rx_err (dev)) | 474 | if (netif_msg_rx_err (dev)) |
475 | devdbg (dev, "rx status %d", urb_status); | 475 | devdbg (dev, "rx status %d", urb_status); |
476 | break; | 476 | break; |
@@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net) | |||
583 | 583 | ||
584 | if (netif_msg_ifdown (dev)) | 584 | if (netif_msg_ifdown (dev)) |
585 | devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", | 585 | devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", |
586 | dev->stats.rx_packets, dev->stats.tx_packets, | 586 | net->stats.rx_packets, net->stats.tx_packets, |
587 | dev->stats.rx_errors, dev->stats.tx_errors | 587 | net->stats.rx_errors, net->stats.tx_errors |
588 | ); | 588 | ); |
589 | 589 | ||
590 | // ensure there are no more active urbs | 590 | // ensure there are no more active urbs |
@@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb) | |||
891 | struct usbnet *dev = entry->dev; | 891 | struct usbnet *dev = entry->dev; |
892 | 892 | ||
893 | if (urb->status == 0) { | 893 | if (urb->status == 0) { |
894 | dev->stats.tx_packets++; | 894 | dev->net->stats.tx_packets++; |
895 | dev->stats.tx_bytes += entry->length; | 895 | dev->net->stats.tx_bytes += entry->length; |
896 | } else { | 896 | } else { |
897 | dev->stats.tx_errors++; | 897 | dev->net->stats.tx_errors++; |
898 | 898 | ||
899 | switch (urb->status) { | 899 | switch (urb->status) { |
900 | case -EPIPE: | 900 | case -EPIPE: |
@@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) | |||
1020 | devdbg (dev, "drop, code %d", retval); | 1020 | devdbg (dev, "drop, code %d", retval); |
1021 | drop: | 1021 | drop: |
1022 | retval = NET_XMIT_SUCCESS; | 1022 | retval = NET_XMIT_SUCCESS; |
1023 | dev->stats.tx_dropped++; | 1023 | dev->net->stats.tx_dropped++; |
1024 | if (skb) | 1024 | if (skb) |
1025 | dev_kfree_skb_any (skb); | 1025 | dev_kfree_skb_any (skb); |
1026 | usb_free_urb (urb); | 1026 | usb_free_urb (urb); |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 87197dd9c788..1097c72e44d5 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
@@ -208,11 +208,14 @@ rx_drop: | |||
208 | 208 | ||
209 | static struct net_device_stats *veth_get_stats(struct net_device *dev) | 209 | static struct net_device_stats *veth_get_stats(struct net_device *dev) |
210 | { | 210 | { |
211 | struct veth_priv *priv = netdev_priv(dev); | 211 | struct veth_priv *priv; |
212 | struct net_device_stats *dev_stats = &dev->stats; | 212 | struct net_device_stats *dev_stats; |
213 | unsigned int cpu; | 213 | int cpu; |
214 | struct veth_net_stats *stats; | 214 | struct veth_net_stats *stats; |
215 | 215 | ||
216 | priv = netdev_priv(dev); | ||
217 | dev_stats = &dev->stats; | ||
218 | |||
216 | dev_stats->rx_packets = 0; | 219 | dev_stats->rx_packets = 0; |
217 | dev_stats->tx_packets = 0; | 220 | dev_stats->tx_packets = 0; |
218 | dev_stats->rx_bytes = 0; | 221 | dev_stats->rx_bytes = 0; |
@@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev) | |||
220 | dev_stats->tx_dropped = 0; | 223 | dev_stats->tx_dropped = 0; |
221 | dev_stats->rx_dropped = 0; | 224 | dev_stats->rx_dropped = 0; |
222 | 225 | ||
223 | if (priv->stats) | 226 | for_each_online_cpu(cpu) { |
224 | for_each_online_cpu(cpu) { | 227 | stats = per_cpu_ptr(priv->stats, cpu); |
225 | stats = per_cpu_ptr(priv->stats, cpu); | ||
226 | 228 | ||
227 | dev_stats->rx_packets += stats->rx_packets; | 229 | dev_stats->rx_packets += stats->rx_packets; |
228 | dev_stats->tx_packets += stats->tx_packets; | 230 | dev_stats->tx_packets += stats->tx_packets; |
229 | dev_stats->rx_bytes += stats->rx_bytes; | 231 | dev_stats->rx_bytes += stats->rx_bytes; |
230 | dev_stats->tx_bytes += stats->tx_bytes; | 232 | dev_stats->tx_bytes += stats->tx_bytes; |
231 | dev_stats->tx_dropped += stats->tx_dropped; | 233 | dev_stats->tx_dropped += stats->tx_dropped; |
232 | dev_stats->rx_dropped += stats->rx_dropped; | 234 | dev_stats->rx_dropped += stats->rx_dropped; |
233 | } | 235 | } |
234 | 236 | ||
235 | return dev_stats; | 237 | return dev_stats; |
236 | } | 238 | } |
@@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev) | |||
257 | netif_carrier_off(dev); | 259 | netif_carrier_off(dev); |
258 | netif_carrier_off(priv->peer); | 260 | netif_carrier_off(priv->peer); |
259 | 261 | ||
260 | free_percpu(priv->stats); | ||
261 | priv->stats = NULL; | ||
262 | return 0; | 262 | return 0; |
263 | } | 263 | } |
264 | 264 | ||
@@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev) | |||
289 | return 0; | 289 | return 0; |
290 | } | 290 | } |
291 | 291 | ||
292 | static void veth_dev_free(struct net_device *dev) | ||
293 | { | ||
294 | struct veth_priv *priv; | ||
295 | |||
296 | priv = netdev_priv(dev); | ||
297 | free_percpu(priv->stats); | ||
298 | free_netdev(dev); | ||
299 | } | ||
300 | |||
292 | static const struct net_device_ops veth_netdev_ops = { | 301 | static const struct net_device_ops veth_netdev_ops = { |
293 | .ndo_init = veth_dev_init, | 302 | .ndo_init = veth_dev_init, |
294 | .ndo_open = veth_open, | 303 | .ndo_open = veth_open, |
@@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev) | |||
306 | dev->netdev_ops = &veth_netdev_ops; | 315 | dev->netdev_ops = &veth_netdev_ops; |
307 | dev->ethtool_ops = &veth_ethtool_ops; | 316 | dev->ethtool_ops = &veth_ethtool_ops; |
308 | dev->features |= NETIF_F_LLTX; | 317 | dev->features |= NETIF_F_LLTX; |
309 | dev->destructor = free_netdev; | 318 | dev->destructor = veth_dev_free; |
310 | } | 319 | } |
311 | 320 | ||
312 | /* | 321 | /* |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index b02f7adff5dc..3ba35956327a 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1847,7 +1847,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ | |||
1847 | */ | 1847 | */ |
1848 | if (tdinfo->skb_dma) { | 1848 | if (tdinfo->skb_dma) { |
1849 | 1849 | ||
1850 | pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN); | 1850 | pktlen = max_t(unsigned int, skb->len, ETH_ZLEN); |
1851 | for (i = 0; i < tdinfo->nskb_dma; i++) { | 1851 | for (i = 0; i < tdinfo->nskb_dma; i++) { |
1852 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 1852 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
1853 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); | 1853 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 55f7de09d134..ea045151f953 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -538,6 +538,7 @@ ath5k_pci_probe(struct pci_dev *pdev, | |||
538 | sc->iobase = mem; /* So we can unmap it on detach */ | 538 | sc->iobase = mem; /* So we can unmap it on detach */ |
539 | sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ | 539 | sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ |
540 | sc->opmode = NL80211_IFTYPE_STATION; | 540 | sc->opmode = NL80211_IFTYPE_STATION; |
541 | sc->bintval = 1000; | ||
541 | mutex_init(&sc->lock); | 542 | mutex_init(&sc->lock); |
542 | spin_lock_init(&sc->rxbuflock); | 543 | spin_lock_init(&sc->rxbuflock); |
543 | spin_lock_init(&sc->txbuflock); | 544 | spin_lock_init(&sc->txbuflock); |
@@ -686,6 +687,13 @@ ath5k_pci_resume(struct pci_dev *pdev) | |||
686 | if (err) | 687 | if (err) |
687 | return err; | 688 | return err; |
688 | 689 | ||
690 | /* | ||
691 | * Suspend/Resume resets the PCI configuration space, so we have to | ||
692 | * re-disable the RETRY_TIMEOUT register (0x41) to keep | ||
693 | * PCI Tx retries from interfering with C3 CPU state | ||
694 | */ | ||
695 | pci_write_config_byte(pdev, 0x41, 0); | ||
696 | |||
689 | err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); | 697 | err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); |
690 | if (err) { | 698 | if (err) { |
691 | ATH5K_ERR(sc, "request_irq failed\n"); | 699 | ATH5K_ERR(sc, "request_irq failed\n"); |
@@ -2748,9 +2756,6 @@ static int ath5k_add_interface(struct ieee80211_hw *hw, | |||
2748 | goto end; | 2756 | goto end; |
2749 | } | 2757 | } |
2750 | 2758 | ||
2751 | /* Set to a reasonable value. Note that this will | ||
2752 | * be set to mac80211's value at ath5k_config(). */ | ||
2753 | sc->bintval = 1000; | ||
2754 | ath5k_hw_set_lladdr(sc->ah, conf->mac_addr); | 2759 | ath5k_hw_set_lladdr(sc->ah, conf->mac_addr); |
2755 | 2760 | ||
2756 | ret = 0; | 2761 | ret = 0; |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9f49a3251d4d..66a6c1f5022a 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -1196,8 +1196,8 @@ void ath_radio_disable(struct ath_softc *sc) | |||
1196 | 1196 | ||
1197 | ath9k_hw_phy_disable(ah); | 1197 | ath9k_hw_phy_disable(ah); |
1198 | ath9k_hw_configpcipowersave(ah, 1); | 1198 | ath9k_hw_configpcipowersave(ah, 1); |
1199 | ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); | ||
1200 | ath9k_ps_restore(sc); | 1199 | ath9k_ps_restore(sc); |
1200 | ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); | ||
1201 | } | 1201 | } |
1202 | 1202 | ||
1203 | /*******************/ | 1203 | /*******************/ |
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index ccdf20a2e9be..170c5b32e49b 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c | |||
@@ -87,6 +87,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
87 | struct ath_softc *sc; | 87 | struct ath_softc *sc; |
88 | struct ieee80211_hw *hw; | 88 | struct ieee80211_hw *hw; |
89 | u8 csz; | 89 | u8 csz; |
90 | u32 val; | ||
90 | int ret = 0; | 91 | int ret = 0; |
91 | struct ath_hw *ah; | 92 | struct ath_hw *ah; |
92 | 93 | ||
@@ -133,6 +134,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
133 | 134 | ||
134 | pci_set_master(pdev); | 135 | pci_set_master(pdev); |
135 | 136 | ||
137 | /* | ||
138 | * Disable the RETRY_TIMEOUT register (0x41) to keep | ||
139 | * PCI Tx retries from interfering with C3 CPU state. | ||
140 | */ | ||
141 | pci_read_config_dword(pdev, 0x40, &val); | ||
142 | if ((val & 0x0000ff00) != 0) | ||
143 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); | ||
144 | |||
136 | ret = pci_request_region(pdev, 0, "ath9k"); | 145 | ret = pci_request_region(pdev, 0, "ath9k"); |
137 | if (ret) { | 146 | if (ret) { |
138 | dev_err(&pdev->dev, "PCI memory region reserve error\n"); | 147 | dev_err(&pdev->dev, "PCI memory region reserve error\n"); |
@@ -239,12 +248,21 @@ static int ath_pci_resume(struct pci_dev *pdev) | |||
239 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | 248 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); |
240 | struct ath_wiphy *aphy = hw->priv; | 249 | struct ath_wiphy *aphy = hw->priv; |
241 | struct ath_softc *sc = aphy->sc; | 250 | struct ath_softc *sc = aphy->sc; |
251 | u32 val; | ||
242 | int err; | 252 | int err; |
243 | 253 | ||
244 | err = pci_enable_device(pdev); | 254 | err = pci_enable_device(pdev); |
245 | if (err) | 255 | if (err) |
246 | return err; | 256 | return err; |
247 | pci_restore_state(pdev); | 257 | pci_restore_state(pdev); |
258 | /* | ||
259 | * Suspend/Resume resets the PCI configuration space, so we have to | ||
260 | * re-disable the RETRY_TIMEOUT register (0x41) to keep | ||
261 | * PCI Tx retries from interfering with C3 CPU state | ||
262 | */ | ||
263 | pci_read_config_dword(pdev, 0x40, &val); | ||
264 | if ((val & 0x0000ff00) != 0) | ||
265 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); | ||
248 | 266 | ||
249 | /* Enable LED */ | 267 | /* Enable LED */ |
250 | ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN, | 268 | ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN, |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index f99f3a76df3f..cece1c4c6bda 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -539,11 +539,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) | |||
539 | if (ath_beacon_dtim_pending_cab(skb)) { | 539 | if (ath_beacon_dtim_pending_cab(skb)) { |
540 | /* | 540 | /* |
541 | * Remain awake waiting for buffered broadcast/multicast | 541 | * Remain awake waiting for buffered broadcast/multicast |
542 | * frames. | 542 | * frames. If the last broadcast/multicast frame is not |
543 | * received properly, the next beacon frame will work as | ||
544 | * a backup trigger for returning into NETWORK SLEEP state, | ||
545 | * so we are waiting for it as well. | ||
543 | */ | 546 | */ |
544 | DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating " | 547 | DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating " |
545 | "buffered broadcast/multicast frame(s)\n"); | 548 | "buffered broadcast/multicast frame(s)\n"); |
546 | sc->sc_flags |= SC_OP_WAIT_FOR_CAB; | 549 | sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON; |
547 | return; | 550 | return; |
548 | } | 551 | } |
549 | 552 | ||
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h index 635c16ee6186..77c339f8516c 100644 --- a/drivers/net/wireless/iwmc3200wifi/iwm.h +++ b/drivers/net/wireless/iwmc3200wifi/iwm.h | |||
@@ -288,6 +288,7 @@ struct iwm_priv { | |||
288 | u8 *eeprom; | 288 | u8 *eeprom; |
289 | struct timer_list watchdog; | 289 | struct timer_list watchdog; |
290 | struct work_struct reset_worker; | 290 | struct work_struct reset_worker; |
291 | struct mutex mutex; | ||
291 | struct rfkill *rfkill; | 292 | struct rfkill *rfkill; |
292 | 293 | ||
293 | char private[0] __attribute__((__aligned__(NETDEV_ALIGN))); | 294 | char private[0] __attribute__((__aligned__(NETDEV_ALIGN))); |
@@ -315,8 +316,11 @@ extern const struct iw_handler_def iwm_iw_handler_def; | |||
315 | void *iwm_if_alloc(int sizeof_bus, struct device *dev, | 316 | void *iwm_if_alloc(int sizeof_bus, struct device *dev, |
316 | struct iwm_if_ops *if_ops); | 317 | struct iwm_if_ops *if_ops); |
317 | void iwm_if_free(struct iwm_priv *iwm); | 318 | void iwm_if_free(struct iwm_priv *iwm); |
319 | int iwm_if_add(struct iwm_priv *iwm); | ||
320 | void iwm_if_remove(struct iwm_priv *iwm); | ||
318 | int iwm_mode_to_nl80211_iftype(int mode); | 321 | int iwm_mode_to_nl80211_iftype(int mode); |
319 | int iwm_priv_init(struct iwm_priv *iwm); | 322 | int iwm_priv_init(struct iwm_priv *iwm); |
323 | void iwm_priv_deinit(struct iwm_priv *iwm); | ||
320 | void iwm_reset(struct iwm_priv *iwm); | 324 | void iwm_reset(struct iwm_priv *iwm); |
321 | void iwm_tx_credit_init_pools(struct iwm_priv *iwm, | 325 | void iwm_tx_credit_init_pools(struct iwm_priv *iwm, |
322 | struct iwm_umac_notif_alive *alive); | 326 | struct iwm_umac_notif_alive *alive); |
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c index 6a2640f16b6d..8be206d58222 100644 --- a/drivers/net/wireless/iwmc3200wifi/main.c +++ b/drivers/net/wireless/iwmc3200wifi/main.c | |||
@@ -112,6 +112,9 @@ static void iwm_statistics_request(struct work_struct *work) | |||
112 | iwm_send_umac_stats_req(iwm, 0); | 112 | iwm_send_umac_stats_req(iwm, 0); |
113 | } | 113 | } |
114 | 114 | ||
115 | int __iwm_up(struct iwm_priv *iwm); | ||
116 | int __iwm_down(struct iwm_priv *iwm); | ||
117 | |||
115 | static void iwm_reset_worker(struct work_struct *work) | 118 | static void iwm_reset_worker(struct work_struct *work) |
116 | { | 119 | { |
117 | struct iwm_priv *iwm; | 120 | struct iwm_priv *iwm; |
@@ -120,6 +123,19 @@ static void iwm_reset_worker(struct work_struct *work) | |||
120 | 123 | ||
121 | iwm = container_of(work, struct iwm_priv, reset_worker); | 124 | iwm = container_of(work, struct iwm_priv, reset_worker); |
122 | 125 | ||
126 | /* | ||
127 | * XXX: The iwm->mutex is introduced purely for this reset work, | ||
128 | * because the other users for iwm_up and iwm_down are only netdev | ||
129 | * ndo_open and ndo_stop which are already protected by rtnl. | ||
130 | * Please remove iwm->mutex together if iwm_reset_worker() is not | ||
131 | * required in the future. | ||
132 | */ | ||
133 | if (!mutex_trylock(&iwm->mutex)) { | ||
134 | IWM_WARN(iwm, "We are in the middle of interface bringing " | ||
135 | "UP/DOWN. Skip driver resetting.\n"); | ||
136 | return; | ||
137 | } | ||
138 | |||
123 | if (iwm->umac_profile_active) { | 139 | if (iwm->umac_profile_active) { |
124 | profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL); | 140 | profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL); |
125 | if (profile) | 141 | if (profile) |
@@ -128,10 +144,10 @@ static void iwm_reset_worker(struct work_struct *work) | |||
128 | IWM_ERR(iwm, "Couldn't alloc memory for profile\n"); | 144 | IWM_ERR(iwm, "Couldn't alloc memory for profile\n"); |
129 | } | 145 | } |
130 | 146 | ||
131 | iwm_down(iwm); | 147 | __iwm_down(iwm); |
132 | 148 | ||
133 | while (retry++ < 3) { | 149 | while (retry++ < 3) { |
134 | ret = iwm_up(iwm); | 150 | ret = __iwm_up(iwm); |
135 | if (!ret) | 151 | if (!ret) |
136 | break; | 152 | break; |
137 | 153 | ||
@@ -142,7 +158,7 @@ static void iwm_reset_worker(struct work_struct *work) | |||
142 | IWM_WARN(iwm, "iwm_up() failed: %d\n", ret); | 158 | IWM_WARN(iwm, "iwm_up() failed: %d\n", ret); |
143 | 159 | ||
144 | kfree(profile); | 160 | kfree(profile); |
145 | return; | 161 | goto out; |
146 | } | 162 | } |
147 | 163 | ||
148 | if (profile) { | 164 | if (profile) { |
@@ -151,6 +167,9 @@ static void iwm_reset_worker(struct work_struct *work) | |||
151 | iwm_send_mlme_profile(iwm); | 167 | iwm_send_mlme_profile(iwm); |
152 | kfree(profile); | 168 | kfree(profile); |
153 | } | 169 | } |
170 | |||
171 | out: | ||
172 | mutex_unlock(&iwm->mutex); | ||
154 | } | 173 | } |
155 | 174 | ||
156 | static void iwm_watchdog(unsigned long data) | 175 | static void iwm_watchdog(unsigned long data) |
@@ -215,10 +234,21 @@ int iwm_priv_init(struct iwm_priv *iwm) | |||
215 | init_timer(&iwm->watchdog); | 234 | init_timer(&iwm->watchdog); |
216 | iwm->watchdog.function = iwm_watchdog; | 235 | iwm->watchdog.function = iwm_watchdog; |
217 | iwm->watchdog.data = (unsigned long)iwm; | 236 | iwm->watchdog.data = (unsigned long)iwm; |
237 | mutex_init(&iwm->mutex); | ||
218 | 238 | ||
219 | return 0; | 239 | return 0; |
220 | } | 240 | } |
221 | 241 | ||
242 | void iwm_priv_deinit(struct iwm_priv *iwm) | ||
243 | { | ||
244 | int i; | ||
245 | |||
246 | for (i = 0; i < IWM_TX_QUEUES; i++) | ||
247 | destroy_workqueue(iwm->txq[i].wq); | ||
248 | |||
249 | destroy_workqueue(iwm->rx_wq); | ||
250 | } | ||
251 | |||
222 | /* | 252 | /* |
223 | * We reset all the structures, and we reset the UMAC. | 253 | * We reset all the structures, and we reset the UMAC. |
224 | * After calling this routine, you're expected to reload | 254 | * After calling this routine, you're expected to reload |
@@ -466,7 +496,7 @@ void iwm_link_off(struct iwm_priv *iwm) | |||
466 | 496 | ||
467 | iwm_rx_free(iwm); | 497 | iwm_rx_free(iwm); |
468 | 498 | ||
469 | cancel_delayed_work(&iwm->stats_request); | 499 | cancel_delayed_work_sync(&iwm->stats_request); |
470 | memset(wstats, 0, sizeof(struct iw_statistics)); | 500 | memset(wstats, 0, sizeof(struct iw_statistics)); |
471 | wstats->qual.updated = IW_QUAL_ALL_INVALID; | 501 | wstats->qual.updated = IW_QUAL_ALL_INVALID; |
472 | 502 | ||
@@ -511,7 +541,7 @@ static int iwm_channels_init(struct iwm_priv *iwm) | |||
511 | return 0; | 541 | return 0; |
512 | } | 542 | } |
513 | 543 | ||
514 | int iwm_up(struct iwm_priv *iwm) | 544 | int __iwm_up(struct iwm_priv *iwm) |
515 | { | 545 | { |
516 | int ret; | 546 | int ret; |
517 | struct iwm_notif *notif_reboot, *notif_ack = NULL; | 547 | struct iwm_notif *notif_reboot, *notif_ack = NULL; |
@@ -647,7 +677,18 @@ int iwm_up(struct iwm_priv *iwm) | |||
647 | return -EIO; | 677 | return -EIO; |
648 | } | 678 | } |
649 | 679 | ||
650 | int iwm_down(struct iwm_priv *iwm) | 680 | int iwm_up(struct iwm_priv *iwm) |
681 | { | ||
682 | int ret; | ||
683 | |||
684 | mutex_lock(&iwm->mutex); | ||
685 | ret = __iwm_up(iwm); | ||
686 | mutex_unlock(&iwm->mutex); | ||
687 | |||
688 | return ret; | ||
689 | } | ||
690 | |||
691 | int __iwm_down(struct iwm_priv *iwm) | ||
651 | { | 692 | { |
652 | int ret; | 693 | int ret; |
653 | 694 | ||
@@ -678,3 +719,14 @@ int iwm_down(struct iwm_priv *iwm) | |||
678 | 719 | ||
679 | return 0; | 720 | return 0; |
680 | } | 721 | } |
722 | |||
723 | int iwm_down(struct iwm_priv *iwm) | ||
724 | { | ||
725 | int ret; | ||
726 | |||
727 | mutex_lock(&iwm->mutex); | ||
728 | ret = __iwm_down(iwm); | ||
729 | mutex_unlock(&iwm->mutex); | ||
730 | |||
731 | return ret; | ||
732 | } | ||
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index 68e2c3b6c7a1..aaa20c6885c8 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c | |||
@@ -114,32 +114,31 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev, | |||
114 | iwm = wdev_to_iwm(wdev); | 114 | iwm = wdev_to_iwm(wdev); |
115 | iwm->bus_ops = if_ops; | 115 | iwm->bus_ops = if_ops; |
116 | iwm->wdev = wdev; | 116 | iwm->wdev = wdev; |
117 | iwm_priv_init(iwm); | 117 | |
118 | ret = iwm_priv_init(iwm); | ||
119 | if (ret) { | ||
120 | dev_err(dev, "failed to init iwm_priv\n"); | ||
121 | goto out_wdev; | ||
122 | } | ||
123 | |||
118 | wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode); | 124 | wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode); |
119 | 125 | ||
120 | ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, | 126 | ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); |
121 | IWM_TX_QUEUES); | ||
122 | if (!ndev) { | 127 | if (!ndev) { |
123 | dev_err(dev, "no memory for network device instance\n"); | 128 | dev_err(dev, "no memory for network device instance\n"); |
124 | goto out_wdev; | 129 | goto out_priv; |
125 | } | 130 | } |
126 | 131 | ||
127 | ndev->netdev_ops = &iwm_netdev_ops; | 132 | ndev->netdev_ops = &iwm_netdev_ops; |
128 | ndev->wireless_handlers = &iwm_iw_handler_def; | 133 | ndev->wireless_handlers = &iwm_iw_handler_def; |
129 | ndev->ieee80211_ptr = wdev; | 134 | ndev->ieee80211_ptr = wdev; |
130 | SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); | 135 | SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); |
131 | ret = register_netdev(ndev); | ||
132 | if (ret < 0) { | ||
133 | dev_err(dev, "Failed to register netdev: %d\n", ret); | ||
134 | goto out_ndev; | ||
135 | } | ||
136 | |||
137 | wdev->netdev = ndev; | 136 | wdev->netdev = ndev; |
138 | 137 | ||
139 | return iwm; | 138 | return iwm; |
140 | 139 | ||
141 | out_ndev: | 140 | out_priv: |
142 | free_netdev(ndev); | 141 | iwm_priv_deinit(iwm); |
143 | 142 | ||
144 | out_wdev: | 143 | out_wdev: |
145 | iwm_wdev_free(iwm); | 144 | iwm_wdev_free(iwm); |
@@ -148,15 +147,29 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev, | |||
148 | 147 | ||
149 | void iwm_if_free(struct iwm_priv *iwm) | 148 | void iwm_if_free(struct iwm_priv *iwm) |
150 | { | 149 | { |
151 | int i; | ||
152 | |||
153 | if (!iwm_to_ndev(iwm)) | 150 | if (!iwm_to_ndev(iwm)) |
154 | return; | 151 | return; |
155 | 152 | ||
156 | unregister_netdev(iwm_to_ndev(iwm)); | ||
157 | free_netdev(iwm_to_ndev(iwm)); | 153 | free_netdev(iwm_to_ndev(iwm)); |
158 | iwm_wdev_free(iwm); | 154 | iwm_wdev_free(iwm); |
159 | destroy_workqueue(iwm->rx_wq); | 155 | iwm_priv_deinit(iwm); |
160 | for (i = 0; i < IWM_TX_QUEUES; i++) | 156 | } |
161 | destroy_workqueue(iwm->txq[i].wq); | 157 | |
158 | int iwm_if_add(struct iwm_priv *iwm) | ||
159 | { | ||
160 | struct net_device *ndev = iwm_to_ndev(iwm); | ||
161 | int ret; | ||
162 | |||
163 | ret = register_netdev(ndev); | ||
164 | if (ret < 0) { | ||
165 | dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret); | ||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | void iwm_if_remove(struct iwm_priv *iwm) | ||
173 | { | ||
174 | unregister_netdev(iwm_to_ndev(iwm)); | ||
162 | } | 175 | } |
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c index b54da677b371..916681837fd2 100644 --- a/drivers/net/wireless/iwmc3200wifi/sdio.c +++ b/drivers/net/wireless/iwmc3200wifi/sdio.c | |||
@@ -454,10 +454,18 @@ static int iwm_sdio_probe(struct sdio_func *func, | |||
454 | 454 | ||
455 | INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker); | 455 | INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker); |
456 | 456 | ||
457 | ret = iwm_if_add(iwm); | ||
458 | if (ret) { | ||
459 | dev_err(dev, "add SDIO interface failed\n"); | ||
460 | goto destroy_wq; | ||
461 | } | ||
462 | |||
457 | dev_info(dev, "IWM SDIO probe\n"); | 463 | dev_info(dev, "IWM SDIO probe\n"); |
458 | 464 | ||
459 | return 0; | 465 | return 0; |
460 | 466 | ||
467 | destroy_wq: | ||
468 | destroy_workqueue(hw->isr_wq); | ||
461 | debugfs_exit: | 469 | debugfs_exit: |
462 | iwm_debugfs_exit(iwm); | 470 | iwm_debugfs_exit(iwm); |
463 | if_free: | 471 | if_free: |
@@ -471,9 +479,10 @@ static void iwm_sdio_remove(struct sdio_func *func) | |||
471 | struct iwm_priv *iwm = hw_to_iwm(hw); | 479 | struct iwm_priv *iwm = hw_to_iwm(hw); |
472 | struct device *dev = &func->dev; | 480 | struct device *dev = &func->dev; |
473 | 481 | ||
482 | iwm_if_remove(iwm); | ||
483 | destroy_workqueue(hw->isr_wq); | ||
474 | iwm_debugfs_exit(iwm); | 484 | iwm_debugfs_exit(iwm); |
475 | iwm_if_free(iwm); | 485 | iwm_if_free(iwm); |
476 | destroy_workqueue(hw->isr_wq); | ||
477 | 486 | ||
478 | sdio_set_drvdata(func, NULL); | 487 | sdio_set_drvdata(func, NULL); |
479 | 488 | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index f0e5e943f6e3..14a19baff214 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -67,6 +67,7 @@ static struct usb_device_id usb_ids[] = { | |||
67 | { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, | 67 | { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, |
68 | { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, | 68 | { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, |
69 | { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, | 69 | { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, |
70 | { USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B }, | ||
70 | { USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B }, | 71 | { USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B }, |
71 | { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B }, | 72 | { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B }, |
72 | { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, | 73 | { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, |