aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-09 15:18:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-09 15:18:17 -0500
commit12e55508921865aefdd80fd17afe70c191afbd1b (patch)
tree476b3226fd9602b36bed0934409959bdd24a7eb5 /drivers/net
parent55b81e6f2795484ea8edf5805c95c007cacfa736 (diff)
parent4d447c9a6ebc0142d320f075c5bac6d202a79fd4 (diff)
Merge branch 'staging-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
* 'staging-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (466 commits) net/hyperv: Add support for jumbo frame up to 64KB net/hyperv: Add NETVSP protocol version negotiation net/hyperv: Remove unnecessary kmap_atomic in netvsc driver staging/rtl8192e: Register against lib80211 staging/rtl8192e: Convert to lib80211_crypt_info staging/rtl8192e: Convert to lib80211_crypt_data and lib80211_crypt_ops staging/rtl8192e: Add lib80211.h to rtllib.h staging/mei: add watchdog device registration wrappers drm/omap: GEM, deal with cache staging: vt6656: int.c, int.h: Change return of function to void staging: usbip: removed unused definitions from header staging: usbip: removed dead code from receive function staging:iio: Drop {mark,unmark}_in_use callbacks staging:iio: Drop buffer mark_param_change callback staging:iio: Drop the unused buffer enable() and is_enabled() callbacks staging:iio: Drop buffer busy flag staging:iio: Make sure a device is only opened once at a time staging:iio: Disallow modifying buffer size when buffer is enabled staging:iio: Disallow changing scan elements in all buffered modes staging:iio: Use iio_buffer_enabled instead of open coding it ... Fix up conflict in drivers/staging/iio/adc/ad799x_core.c (removal of module_init due to using module_i2c_driver() helper, next to removal of MODULE_ALIAS due to using MODULE_DEVICE_TABLE instead).
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/hyperv/Kconfig5
-rw-r--r--drivers/net/hyperv/Makefile3
-rw-r--r--drivers/net/hyperv/hyperv_net.h1165
-rw-r--r--drivers/net/hyperv/netvsc.c932
-rw-r--r--drivers/net/hyperv/netvsc_drv.c508
-rw-r--r--drivers/net/hyperv/rndis_filter.c817
8 files changed, 3434 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9845afb37cc8..b98285446a5a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -342,4 +342,6 @@ config VMXNET3
342 To compile this driver as a module, choose M here: the 342 To compile this driver as a module, choose M here: the
343 module will be called vmxnet3. 343 module will be called vmxnet3.
344 344
345source "drivers/net/hyperv/Kconfig"
346
345endif # NETDEVICES 347endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1988881853ab..a6b8ce11a22f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -68,3 +68,5 @@ obj-$(CONFIG_USB_USBNET) += usb/
68obj-$(CONFIG_USB_ZD1201) += usb/ 68obj-$(CONFIG_USB_ZD1201) += usb/
69obj-$(CONFIG_USB_IPHETH) += usb/ 69obj-$(CONFIG_USB_IPHETH) += usb/
70obj-$(CONFIG_USB_CDC_PHONET) += usb/ 70obj-$(CONFIG_USB_CDC_PHONET) += usb/
71
72obj-$(CONFIG_HYPERV_NET) += hyperv/
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
new file mode 100644
index 000000000000..936968d23559
--- /dev/null
+++ b/drivers/net/hyperv/Kconfig
@@ -0,0 +1,5 @@
1config HYPERV_NET
2 tristate "Microsoft Hyper-V virtual network driver"
3 depends on HYPERV
4 help
5 Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
new file mode 100644
index 000000000000..c8a66827100c
--- /dev/null
+++ b/drivers/net/hyperv/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
2
3hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
new file mode 100644
index 000000000000..dec5836ae075
--- /dev/null
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -0,0 +1,1165 @@
1/*
2 *
3 * Copyright (c) 2011, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24
25#ifndef _HYPERV_NET_H
26#define _HYPERV_NET_H
27
28#include <linux/list.h>
29#include <linux/hyperv.h>
30
31/* Fwd declaration */
32struct hv_netvsc_packet;
33
34/* Represent the xfer page packet which contains 1 or more netvsc packet */
35struct xferpage_packet {
36 struct list_head list_ent;
37
38 /* # of netvsc packets this xfer packet contains */
39 u32 count;
40};
41
42/*
43 * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
44 * within the RNDIS
45 */
46struct hv_netvsc_packet {
47 /* Bookkeeping stuff */
48 struct list_head list_ent;
49
50 struct hv_device *device;
51 bool is_data_pkt;
52
53 /*
54 * Valid only for receives when we break a xfer page packet
55 * into multiple netvsc packets
56 */
57 struct xferpage_packet *xfer_page_pkt;
58
59 union {
60 struct {
61 u64 recv_completion_tid;
62 void *recv_completion_ctx;
63 void (*recv_completion)(void *context);
64 } recv;
65 struct {
66 u64 send_completion_tid;
67 void *send_completion_ctx;
68 void (*send_completion)(void *context);
69 } send;
70 } completion;
71
72 /* This points to the memory after page_buf */
73 void *extension;
74
75 u32 total_data_buflen;
76 /* Points to the send/receive buffer where the ethernet frame is */
77 void *data;
78 u32 page_buf_cnt;
79 struct hv_page_buffer page_buf[0];
80};
81
82struct netvsc_device_info {
83 unsigned char mac_adr[6];
84 bool link_state; /* 0 - link up, 1 - link down */
85 int ring_size;
86};
87
88enum rndis_device_state {
89 RNDIS_DEV_UNINITIALIZED = 0,
90 RNDIS_DEV_INITIALIZING,
91 RNDIS_DEV_INITIALIZED,
92 RNDIS_DEV_DATAINITIALIZED,
93};
94
95struct rndis_device {
96 struct netvsc_device *net_dev;
97
98 enum rndis_device_state state;
99 bool link_state;
100 atomic_t new_req_id;
101
102 spinlock_t request_lock;
103 struct list_head req_list;
104
105 unsigned char hw_mac_adr[ETH_ALEN];
106};
107
108
109/* Interface */
110int netvsc_device_add(struct hv_device *device, void *additional_info);
111int netvsc_device_remove(struct hv_device *device);
112int netvsc_send(struct hv_device *device,
113 struct hv_netvsc_packet *packet);
114void netvsc_linkstatus_callback(struct hv_device *device_obj,
115 unsigned int status);
116int netvsc_recv_callback(struct hv_device *device_obj,
117 struct hv_netvsc_packet *packet);
118int rndis_filter_open(struct hv_device *dev);
119int rndis_filter_close(struct hv_device *dev);
120int rndis_filter_device_add(struct hv_device *dev,
121 void *additional_info);
122void rndis_filter_device_remove(struct hv_device *dev);
123int rndis_filter_receive(struct hv_device *dev,
124 struct hv_netvsc_packet *pkt);
125
126
127
128int rndis_filter_send(struct hv_device *dev,
129 struct hv_netvsc_packet *pkt);
130
131int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
132
133
134#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
135
136#define NVSP_PROTOCOL_VERSION_1 2
137#define NVSP_PROTOCOL_VERSION_2 0x30002
138
139enum {
140 NVSP_MSG_TYPE_NONE = 0,
141
142 /* Init Messages */
143 NVSP_MSG_TYPE_INIT = 1,
144 NVSP_MSG_TYPE_INIT_COMPLETE = 2,
145
146 NVSP_VERSION_MSG_START = 100,
147
148 /* Version 1 Messages */
149 NVSP_MSG1_TYPE_SEND_NDIS_VER = NVSP_VERSION_MSG_START,
150
151 NVSP_MSG1_TYPE_SEND_RECV_BUF,
152 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE,
153 NVSP_MSG1_TYPE_REVOKE_RECV_BUF,
154
155 NVSP_MSG1_TYPE_SEND_SEND_BUF,
156 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE,
157 NVSP_MSG1_TYPE_REVOKE_SEND_BUF,
158
159 NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
160 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
161
162 /* Version 2 messages */
163 NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF,
164 NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF_COMP,
165 NVSP_MSG2_TYPE_REVOKE_CHIMNEY_DELEGATED_BUF,
166
167 NVSP_MSG2_TYPE_RESUME_CHIMNEY_RX_INDICATION,
168
169 NVSP_MSG2_TYPE_TERMINATE_CHIMNEY,
170 NVSP_MSG2_TYPE_TERMINATE_CHIMNEY_COMP,
171
172 NVSP_MSG2_TYPE_INDICATE_CHIMNEY_EVENT,
173
174 NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT,
175 NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT_COMP,
176
177 NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ,
178 NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ_COMP,
179
180 NVSP_MSG2_TYPE_ALLOC_RXBUF,
181 NVSP_MSG2_TYPE_ALLOC_RXBUF_COMP,
182
183 NVSP_MSG2_TYPE_FREE_RXBUF,
184
185 NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT,
186 NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT_COMP,
187
188 NVSP_MSG2_TYPE_SEND_NDIS_CONFIG,
189
190 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
191 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
192};
193
194enum {
195 NVSP_STAT_NONE = 0,
196 NVSP_STAT_SUCCESS,
197 NVSP_STAT_FAIL,
198 NVSP_STAT_PROTOCOL_TOO_NEW,
199 NVSP_STAT_PROTOCOL_TOO_OLD,
200 NVSP_STAT_INVALID_RNDIS_PKT,
201 NVSP_STAT_BUSY,
202 NVSP_STAT_PROTOCOL_UNSUPPORTED,
203 NVSP_STAT_MAX,
204};
205
206struct nvsp_message_header {
207 u32 msg_type;
208};
209
210/* Init Messages */
211
212/*
213 * This message is used by the VSC to initialize the channel after the channels
214 * has been opened. This message should never include anything other then
215 * versioning (i.e. this message will be the same for ever).
216 */
217struct nvsp_message_init {
218 u32 min_protocol_ver;
219 u32 max_protocol_ver;
220} __packed;
221
222/*
223 * This message is used by the VSP to complete the initialization of the
224 * channel. This message should never include anything other then versioning
225 * (i.e. this message will be the same for ever).
226 */
227struct nvsp_message_init_complete {
228 u32 negotiated_protocol_ver;
229 u32 max_mdl_chain_len;
230 u32 status;
231} __packed;
232
233union nvsp_message_init_uber {
234 struct nvsp_message_init init;
235 struct nvsp_message_init_complete init_complete;
236} __packed;
237
238/* Version 1 Messages */
239
240/*
241 * This message is used by the VSC to send the NDIS version to the VSP. The VSP
242 * can use this information when handling OIDs sent by the VSC.
243 */
244struct nvsp_1_message_send_ndis_version {
245 u32 ndis_major_ver;
246 u32 ndis_minor_ver;
247} __packed;
248
249/*
250 * This message is used by the VSC to send a receive buffer to the VSP. The VSP
251 * can then use the receive buffer to send data to the VSC.
252 */
253struct nvsp_1_message_send_receive_buffer {
254 u32 gpadl_handle;
255 u16 id;
256} __packed;
257
258struct nvsp_1_receive_buffer_section {
259 u32 offset;
260 u32 sub_alloc_size;
261 u32 num_sub_allocs;
262 u32 end_offset;
263} __packed;
264
265/*
266 * This message is used by the VSP to acknowledge a receive buffer send by the
267 * VSC. This message must be sent by the VSP before the VSP uses the receive
268 * buffer.
269 */
270struct nvsp_1_message_send_receive_buffer_complete {
271 u32 status;
272 u32 num_sections;
273
274 /*
275 * The receive buffer is split into two parts, a large suballocation
276 * section and a small suballocation section. These sections are then
277 * suballocated by a certain size.
278 */
279
280 /*
281 * For example, the following break up of the receive buffer has 6
282 * large suballocations and 10 small suballocations.
283 */
284
285 /*
286 * | Large Section | | Small Section |
287 * ------------------------------------------------------------
288 * | | | | | | | | | | | | | | | | | |
289 * | |
290 * LargeOffset SmallOffset
291 */
292
293 struct nvsp_1_receive_buffer_section sections[1];
294} __packed;
295
296/*
297 * This message is sent by the VSC to revoke the receive buffer. After the VSP
298 * completes this transaction, the vsp should never use the receive buffer
299 * again.
300 */
301struct nvsp_1_message_revoke_receive_buffer {
302 u16 id;
303};
304
305/*
306 * This message is used by the VSC to send a send buffer to the VSP. The VSC
307 * can then use the send buffer to send data to the VSP.
308 */
309struct nvsp_1_message_send_send_buffer {
310 u32 gpadl_handle;
311 u16 id;
312} __packed;
313
314/*
315 * This message is used by the VSP to acknowledge a send buffer sent by the
316 * VSC. This message must be sent by the VSP before the VSP uses the sent
317 * buffer.
318 */
319struct nvsp_1_message_send_send_buffer_complete {
320 u32 status;
321
322 /*
323 * The VSC gets to choose the size of the send buffer and the VSP gets
324 * to choose the sections size of the buffer. This was done to enable
325 * dynamic reconfigurations when the cost of GPA-direct buffers
326 * decreases.
327 */
328 u32 section_size;
329} __packed;
330
331/*
332 * This message is sent by the VSC to revoke the send buffer. After the VSP
333 * completes this transaction, the vsp should never use the send buffer again.
334 */
335struct nvsp_1_message_revoke_send_buffer {
336 u16 id;
337};
338
339/*
340 * This message is used by both the VSP and the VSC to send a RNDIS message to
341 * the opposite channel endpoint.
342 */
343struct nvsp_1_message_send_rndis_packet {
344 /*
345 * This field is specified by RNIDS. They assume there's two different
346 * channels of communication. However, the Network VSP only has one.
347 * Therefore, the channel travels with the RNDIS packet.
348 */
349 u32 channel_type;
350
351 /*
352 * This field is used to send part or all of the data through a send
353 * buffer. This values specifies an index into the send buffer. If the
354 * index is 0xFFFFFFFF, then the send buffer is not being used and all
355 * of the data was sent through other VMBus mechanisms.
356 */
357 u32 send_buf_section_index;
358 u32 send_buf_section_size;
359} __packed;
360
361/*
362 * This message is used by both the VSP and the VSC to complete a RNDIS message
363 * to the opposite channel endpoint. At this point, the initiator of this
364 * message cannot use any resources associated with the original RNDIS packet.
365 */
366struct nvsp_1_message_send_rndis_packet_complete {
367 u32 status;
368};
369
370union nvsp_1_message_uber {
371 struct nvsp_1_message_send_ndis_version send_ndis_ver;
372
373 struct nvsp_1_message_send_receive_buffer send_recv_buf;
374 struct nvsp_1_message_send_receive_buffer_complete
375 send_recv_buf_complete;
376 struct nvsp_1_message_revoke_receive_buffer revoke_recv_buf;
377
378 struct nvsp_1_message_send_send_buffer send_send_buf;
379 struct nvsp_1_message_send_send_buffer_complete send_send_buf_complete;
380 struct nvsp_1_message_revoke_send_buffer revoke_send_buf;
381
382 struct nvsp_1_message_send_rndis_packet send_rndis_pkt;
383 struct nvsp_1_message_send_rndis_packet_complete
384 send_rndis_pkt_complete;
385} __packed;
386
387
388/*
389 * Network VSP protocol version 2 messages:
390 */
391struct nvsp_2_vsc_capability {
392 union {
393 u64 data;
394 struct {
395 u64 vmq:1;
396 u64 chimney:1;
397 u64 sriov:1;
398 u64 ieee8021q:1;
399 u64 correlation_id:1;
400 };
401 };
402} __packed;
403
404struct nvsp_2_send_ndis_config {
405 u32 mtu;
406 u32 reserved;
407 struct nvsp_2_vsc_capability capability;
408} __packed;
409
410/* Allocate receive buffer */
411struct nvsp_2_alloc_rxbuf {
412 /* Allocation ID to match the allocation request and response */
413 u32 alloc_id;
414
415 /* Length of the VM shared memory receive buffer that needs to
416 * be allocated
417 */
418 u32 len;
419} __packed;
420
421/* Allocate receive buffer complete */
422struct nvsp_2_alloc_rxbuf_comp {
423 /* The NDIS_STATUS code for buffer allocation */
424 u32 status;
425
426 u32 alloc_id;
427
428 /* GPADL handle for the allocated receive buffer */
429 u32 gpadl_handle;
430
431 /* Receive buffer ID */
432 u64 recv_buf_id;
433} __packed;
434
435struct nvsp_2_free_rxbuf {
436 u64 recv_buf_id;
437} __packed;
438
439union nvsp_2_message_uber {
440 struct nvsp_2_send_ndis_config send_ndis_config;
441 struct nvsp_2_alloc_rxbuf alloc_rxbuf;
442 struct nvsp_2_alloc_rxbuf_comp alloc_rxbuf_comp;
443 struct nvsp_2_free_rxbuf free_rxbuf;
444} __packed;
445
446union nvsp_all_messages {
447 union nvsp_message_init_uber init_msg;
448 union nvsp_1_message_uber v1_msg;
449 union nvsp_2_message_uber v2_msg;
450} __packed;
451
452/* ALL Messages */
453struct nvsp_message {
454 struct nvsp_message_header hdr;
455 union nvsp_all_messages msg;
456} __packed;
457
458
459#define NETVSC_MTU 65536
460
461#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */
462
463#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
464
465#define NETVSC_RECEIVE_SG_COUNT 1
466
467/* Preallocated receive packets */
468#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
469
470#define NETVSC_PACKET_SIZE 2048
471
472/* Per netvsc channel-specific */
473struct netvsc_device {
474 struct hv_device *dev;
475
476 u32 nvsp_version;
477
478 atomic_t num_outstanding_sends;
479 bool start_remove;
480 bool destroy;
481 /*
482 * List of free preallocated hv_netvsc_packet to represent receive
483 * packet
484 */
485 struct list_head recv_pkt_list;
486 spinlock_t recv_pkt_list_lock;
487
488 /* Receive buffer allocated by us but manages by NetVSP */
489 void *recv_buf;
490 u32 recv_buf_size;
491 u32 recv_buf_gpadl_handle;
492 u32 recv_section_cnt;
493 struct nvsp_1_receive_buffer_section *recv_section;
494
495 /* Used for NetVSP initialization protocol */
496 struct completion channel_init_wait;
497 struct nvsp_message channel_init_pkt;
498
499 struct nvsp_message revoke_packet;
500 /* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
501
502 struct net_device *ndev;
503
504 /* Holds rndis device info */
505 void *extension;
506};
507
508
509/* Status codes */
510
511
512#ifndef STATUS_SUCCESS
513#define STATUS_SUCCESS (0x00000000L)
514#endif
515
516#ifndef STATUS_UNSUCCESSFUL
517#define STATUS_UNSUCCESSFUL (0xC0000001L)
518#endif
519
520#ifndef STATUS_PENDING
521#define STATUS_PENDING (0x00000103L)
522#endif
523
524#ifndef STATUS_INSUFFICIENT_RESOURCES
525#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
526#endif
527
528#ifndef STATUS_BUFFER_OVERFLOW
529#define STATUS_BUFFER_OVERFLOW (0x80000005L)
530#endif
531
532#ifndef STATUS_NOT_SUPPORTED
533#define STATUS_NOT_SUPPORTED (0xC00000BBL)
534#endif
535
536#define RNDIS_STATUS_SUCCESS (STATUS_SUCCESS)
537#define RNDIS_STATUS_PENDING (STATUS_PENDING)
538#define RNDIS_STATUS_NOT_RECOGNIZED (0x00010001L)
539#define RNDIS_STATUS_NOT_COPIED (0x00010002L)
540#define RNDIS_STATUS_NOT_ACCEPTED (0x00010003L)
541#define RNDIS_STATUS_CALL_ACTIVE (0x00010007L)
542
543#define RNDIS_STATUS_ONLINE (0x40010003L)
544#define RNDIS_STATUS_RESET_START (0x40010004L)
545#define RNDIS_STATUS_RESET_END (0x40010005L)
546#define RNDIS_STATUS_RING_STATUS (0x40010006L)
547#define RNDIS_STATUS_CLOSED (0x40010007L)
548#define RNDIS_STATUS_WAN_LINE_UP (0x40010008L)
549#define RNDIS_STATUS_WAN_LINE_DOWN (0x40010009L)
550#define RNDIS_STATUS_WAN_FRAGMENT (0x4001000AL)
551#define RNDIS_STATUS_MEDIA_CONNECT (0x4001000BL)
552#define RNDIS_STATUS_MEDIA_DISCONNECT (0x4001000CL)
553#define RNDIS_STATUS_HARDWARE_LINE_UP (0x4001000DL)
554#define RNDIS_STATUS_HARDWARE_LINE_DOWN (0x4001000EL)
555#define RNDIS_STATUS_INTERFACE_UP (0x4001000FL)
556#define RNDIS_STATUS_INTERFACE_DOWN (0x40010010L)
557#define RNDIS_STATUS_MEDIA_BUSY (0x40010011L)
558#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION (0x40010012L)
559#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION
560#define RNDIS_STATUS_LINK_SPEED_CHANGE (0x40010013L)
561
562#define RNDIS_STATUS_NOT_RESETTABLE (0x80010001L)
563#define RNDIS_STATUS_SOFT_ERRORS (0x80010003L)
564#define RNDIS_STATUS_HARD_ERRORS (0x80010004L)
565#define RNDIS_STATUS_BUFFER_OVERFLOW (STATUS_BUFFER_OVERFLOW)
566
567#define RNDIS_STATUS_FAILURE (STATUS_UNSUCCESSFUL)
568#define RNDIS_STATUS_RESOURCES (STATUS_INSUFFICIENT_RESOURCES)
569#define RNDIS_STATUS_CLOSING (0xC0010002L)
570#define RNDIS_STATUS_BAD_VERSION (0xC0010004L)
571#define RNDIS_STATUS_BAD_CHARACTERISTICS (0xC0010005L)
572#define RNDIS_STATUS_ADAPTER_NOT_FOUND (0xC0010006L)
573#define RNDIS_STATUS_OPEN_FAILED (0xC0010007L)
574#define RNDIS_STATUS_DEVICE_FAILED (0xC0010008L)
575#define RNDIS_STATUS_MULTICAST_FULL (0xC0010009L)
576#define RNDIS_STATUS_MULTICAST_EXISTS (0xC001000AL)
577#define RNDIS_STATUS_MULTICAST_NOT_FOUND (0xC001000BL)
578#define RNDIS_STATUS_REQUEST_ABORTED (0xC001000CL)
579#define RNDIS_STATUS_RESET_IN_PROGRESS (0xC001000DL)
580#define RNDIS_STATUS_CLOSING_INDICATING (0xC001000EL)
581#define RNDIS_STATUS_NOT_SUPPORTED (STATUS_NOT_SUPPORTED)
582#define RNDIS_STATUS_INVALID_PACKET (0xC001000FL)
583#define RNDIS_STATUS_OPEN_LIST_FULL (0xC0010010L)
584#define RNDIS_STATUS_ADAPTER_NOT_READY (0xC0010011L)
585#define RNDIS_STATUS_ADAPTER_NOT_OPEN (0xC0010012L)
586#define RNDIS_STATUS_NOT_INDICATING (0xC0010013L)
587#define RNDIS_STATUS_INVALID_LENGTH (0xC0010014L)
588#define RNDIS_STATUS_INVALID_DATA (0xC0010015L)
589#define RNDIS_STATUS_BUFFER_TOO_SHORT (0xC0010016L)
590#define RNDIS_STATUS_INVALID_OID (0xC0010017L)
591#define RNDIS_STATUS_ADAPTER_REMOVED (0xC0010018L)
592#define RNDIS_STATUS_UNSUPPORTED_MEDIA (0xC0010019L)
593#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE (0xC001001AL)
594#define RNDIS_STATUS_FILE_NOT_FOUND (0xC001001BL)
595#define RNDIS_STATUS_ERROR_READING_FILE (0xC001001CL)
596#define RNDIS_STATUS_ALREADY_MAPPED (0xC001001DL)
597#define RNDIS_STATUS_RESOURCE_CONFLICT (0xC001001EL)
598#define RNDIS_STATUS_NO_CABLE (0xC001001FL)
599
600#define RNDIS_STATUS_INVALID_SAP (0xC0010020L)
601#define RNDIS_STATUS_SAP_IN_USE (0xC0010021L)
602#define RNDIS_STATUS_INVALID_ADDRESS (0xC0010022L)
603#define RNDIS_STATUS_VC_NOT_ACTIVATED (0xC0010023L)
604#define RNDIS_STATUS_DEST_OUT_OF_ORDER (0xC0010024L)
605#define RNDIS_STATUS_VC_NOT_AVAILABLE (0xC0010025L)
606#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE (0xC0010026L)
607#define RNDIS_STATUS_INCOMPATABLE_QOS (0xC0010027L)
608#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED (0xC0010028L)
609#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION (0xC0010029L)
610
611#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR (0xC0011000L)
612
613/* Object Identifiers used by NdisRequest Query/Set Information */
614/* General Objects */
615#define RNDIS_OID_GEN_SUPPORTED_LIST 0x00010101
616#define RNDIS_OID_GEN_HARDWARE_STATUS 0x00010102
617#define RNDIS_OID_GEN_MEDIA_SUPPORTED 0x00010103
618#define RNDIS_OID_GEN_MEDIA_IN_USE 0x00010104
619#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
620#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
621#define RNDIS_OID_GEN_LINK_SPEED 0x00010107
622#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
623#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
624#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
625#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
626#define RNDIS_OID_GEN_VENDOR_ID 0x0001010C
627#define RNDIS_OID_GEN_VENDOR_DESCRIPTION 0x0001010D
628#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
629#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
630#define RNDIS_OID_GEN_DRIVER_VERSION 0x00010110
631#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
632#define RNDIS_OID_GEN_PROTOCOL_OPTIONS 0x00010112
633#define RNDIS_OID_GEN_MAC_OPTIONS 0x00010113
634#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
635#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
636#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
637#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
638#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
639#define RNDIS_OID_GEN_MACHINE_NAME 0x0001021A
640#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
641
642#define RNDIS_OID_GEN_XMIT_OK 0x00020101
643#define RNDIS_OID_GEN_RCV_OK 0x00020102
644#define RNDIS_OID_GEN_XMIT_ERROR 0x00020103
645#define RNDIS_OID_GEN_RCV_ERROR 0x00020104
646#define RNDIS_OID_GEN_RCV_NO_BUFFER 0x00020105
647
648#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT 0x00020201
649#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
650#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT 0x00020203
651#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
652#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT 0x00020205
653#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
654#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV 0x00020207
655#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
656#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV 0x00020209
657#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
658#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV 0x0002020B
659#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
660
661#define RNDIS_OID_GEN_RCV_CRC_ERROR 0x0002020D
662#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
663
664#define RNDIS_OID_GEN_GET_TIME_CAPS 0x0002020F
665#define RNDIS_OID_GEN_GET_NETCARD_TIME 0x00020210
666
667/* These are connection-oriented general OIDs. */
668/* These replace the above OIDs for connection-oriented media. */
669#define RNDIS_OID_GEN_CO_SUPPORTED_LIST 0x00010101
670#define RNDIS_OID_GEN_CO_HARDWARE_STATUS 0x00010102
671#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED 0x00010103
672#define RNDIS_OID_GEN_CO_MEDIA_IN_USE 0x00010104
673#define RNDIS_OID_GEN_CO_LINK_SPEED 0x00010105
674#define RNDIS_OID_GEN_CO_VENDOR_ID 0x00010106
675#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION 0x00010107
676#define RNDIS_OID_GEN_CO_DRIVER_VERSION 0x00010108
677#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS 0x00010109
678#define RNDIS_OID_GEN_CO_MAC_OPTIONS 0x0001010A
679#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS 0x0001010B
680#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION 0x0001010C
681#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED 0x0001010D
682
683#define RNDIS_OID_GEN_CO_GET_TIME_CAPS 0x00010201
684#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME 0x00010202
685
686/* These are connection-oriented statistics OIDs. */
687#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK 0x00020101
688#define RNDIS_OID_GEN_CO_RCV_PDUS_OK 0x00020102
689#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR 0x00020103
690#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR 0x00020104
691#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER 0x00020105
692
693
694#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR 0x00020201
695#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH 0x00020202
696#define RNDIS_OID_GEN_CO_BYTES_XMIT 0x00020203
697#define RNDIS_OID_GEN_CO_BYTES_RCV 0x00020204
698#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING 0x00020205
699#define RNDIS_OID_GEN_CO_NETCARD_LOAD 0x00020206
700
701/* These are objects for Connection-oriented media call-managers. */
702#define RNDIS_OID_CO_ADD_PVC 0xFF000001
703#define RNDIS_OID_CO_DELETE_PVC 0xFF000002
704#define RNDIS_OID_CO_GET_CALL_INFORMATION 0xFF000003
705#define RNDIS_OID_CO_ADD_ADDRESS 0xFF000004
706#define RNDIS_OID_CO_DELETE_ADDRESS 0xFF000005
707#define RNDIS_OID_CO_GET_ADDRESSES 0xFF000006
708#define RNDIS_OID_CO_ADDRESS_CHANGE 0xFF000007
709#define RNDIS_OID_CO_SIGNALING_ENABLED 0xFF000008
710#define RNDIS_OID_CO_SIGNALING_DISABLED 0xFF000009
711
712/* 802.3 Objects (Ethernet) */
713#define RNDIS_OID_802_3_PERMANENT_ADDRESS 0x01010101
714#define RNDIS_OID_802_3_CURRENT_ADDRESS 0x01010102
715#define RNDIS_OID_802_3_MULTICAST_LIST 0x01010103
716#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
717#define RNDIS_OID_802_3_MAC_OPTIONS 0x01010105
718
719#define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001
720
721#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
722#define RNDIS_OID_802_3_XMIT_ONE_COLLISION 0x01020102
723#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
724
725#define RNDIS_OID_802_3_XMIT_DEFERRED 0x01020201
726#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
727#define RNDIS_OID_802_3_RCV_OVERRUN 0x01020203
728#define RNDIS_OID_802_3_XMIT_UNDERRUN 0x01020204
729#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
730#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
731#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
732
733/* Remote NDIS message types */
734#define REMOTE_NDIS_PACKET_MSG 0x00000001
735#define REMOTE_NDIS_INITIALIZE_MSG 0x00000002
736#define REMOTE_NDIS_HALT_MSG 0x00000003
737#define REMOTE_NDIS_QUERY_MSG 0x00000004
738#define REMOTE_NDIS_SET_MSG 0x00000005
739#define REMOTE_NDIS_RESET_MSG 0x00000006
740#define REMOTE_NDIS_INDICATE_STATUS_MSG 0x00000007
741#define REMOTE_NDIS_KEEPALIVE_MSG 0x00000008
742
743#define REMOTE_CONDIS_MP_CREATE_VC_MSG 0x00008001
744#define REMOTE_CONDIS_MP_DELETE_VC_MSG 0x00008002
745#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG 0x00008005
746#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG 0x00008006
747#define REMOTE_CONDIS_INDICATE_STATUS_MSG 0x00008007
748
749/* Remote NDIS message completion types */
750#define REMOTE_NDIS_INITIALIZE_CMPLT 0x80000002
751#define REMOTE_NDIS_QUERY_CMPLT 0x80000004
752#define REMOTE_NDIS_SET_CMPLT 0x80000005
753#define REMOTE_NDIS_RESET_CMPLT 0x80000006
754#define REMOTE_NDIS_KEEPALIVE_CMPLT 0x80000008
755
756#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT 0x80008001
757#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT 0x80008002
758#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT 0x80008005
759#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT 0x80008006
760
761/*
762 * Reserved message type for private communication between lower-layer host
763 * driver and remote device, if necessary.
764 */
765#define REMOTE_NDIS_BUS_MSG 0xff000001
766
767/* Defines for DeviceFlags in struct rndis_initialize_complete */
768#define RNDIS_DF_CONNECTIONLESS 0x00000001
769#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002
770#define RNDIS_DF_RAW_DATA 0x00000004
771
772/* Remote NDIS medium types. */
773#define RNDIS_MEDIUM_802_3 0x00000000
774#define RNDIS_MEDIUM_802_5 0x00000001
775#define RNDIS_MEDIUM_FDDI 0x00000002
776#define RNDIS_MEDIUM_WAN 0x00000003
777#define RNDIS_MEDIUM_LOCAL_TALK 0x00000004
778#define RNDIS_MEDIUM_ARCNET_RAW 0x00000006
779#define RNDIS_MEDIUM_ARCNET_878_2 0x00000007
780#define RNDIS_MEDIUM_ATM 0x00000008
781#define RNDIS_MEDIUM_WIRELESS_WAN 0x00000009
782#define RNDIS_MEDIUM_IRDA 0x0000000a
783#define RNDIS_MEDIUM_CO_WAN 0x0000000b
784/* Not a real medium, defined as an upper-bound */
785#define RNDIS_MEDIUM_MAX 0x0000000d
786
787
788/* Remote NDIS medium connection states. */
789#define RNDIS_MEDIA_STATE_CONNECTED 0x00000000
790#define RNDIS_MEDIA_STATE_DISCONNECTED 0x00000001
791
792/* Remote NDIS version numbers */
793#define RNDIS_MAJOR_VERSION 0x00000001
794#define RNDIS_MINOR_VERSION 0x00000000
795
796
797/* NdisInitialize message */
798struct rndis_initialize_request {
799 u32 req_id;
800 u32 major_ver;
801 u32 minor_ver;
802 u32 max_xfer_size;
803};
804
805/* Response to NdisInitialize */
806struct rndis_initialize_complete {
807 u32 req_id;
808 u32 status;
809 u32 major_ver;
810 u32 minor_ver;
811 u32 dev_flags;
812 u32 medium;
813 u32 max_pkt_per_msg;
814 u32 max_xfer_size;
815 u32 pkt_alignment_factor;
816 u32 af_list_offset;
817 u32 af_list_size;
818};
819
820/* Call manager devices only: Information about an address family */
821/* supported by the device is appended to the response to NdisInitialize. */
822struct rndis_co_address_family {
823 u32 address_family;
824 u32 major_ver;
825 u32 minor_ver;
826};
827
828/* NdisHalt message */
829struct rndis_halt_request {
830 u32 req_id;
831};
832
833/* NdisQueryRequest message */
834struct rndis_query_request {
835 u32 req_id;
836 u32 oid;
837 u32 info_buflen;
838 u32 info_buf_offset;
839 u32 dev_vc_handle;
840};
841
842/* Response to NdisQueryRequest */
843struct rndis_query_complete {
844 u32 req_id;
845 u32 status;
846 u32 info_buflen;
847 u32 info_buf_offset;
848};
849
850/* NdisSetRequest message */
851struct rndis_set_request {
852 u32 req_id;
853 u32 oid;
854 u32 info_buflen;
855 u32 info_buf_offset;
856 u32 dev_vc_handle;
857};
858
859/* Response to NdisSetRequest */
860struct rndis_set_complete {
861 u32 req_id;
862 u32 status;
863};
864
865/* NdisReset message */
866struct rndis_reset_request {
867 u32 reserved;
868};
869
870/* Response to NdisReset */
871struct rndis_reset_complete {
872 u32 status;
873 u32 addressing_reset;
874};
875
876/* NdisMIndicateStatus message */
877struct rndis_indicate_status {
878 u32 status;
879 u32 status_buflen;
880 u32 status_buf_offset;
881};
882
883/* Diagnostic information passed as the status buffer in */
884/* struct rndis_indicate_status messages signifying error conditions. */
885struct rndis_diagnostic_info {
886 u32 diag_status;
887 u32 error_offset;
888};
889
890/* NdisKeepAlive message */
891struct rndis_keepalive_request {
892 u32 req_id;
893};
894
895/* Response to NdisKeepAlive */
896struct rndis_keepalive_complete {
897 u32 req_id;
898 u32 status;
899};
900
901/*
902 * Data message. All Offset fields contain byte offsets from the beginning of
903 * struct rndis_packet. All Length fields are in bytes. VcHandle is set
904 * to 0 for connectionless data, otherwise it contains the VC handle.
905 */
906struct rndis_packet {
907 u32 data_offset;
908 u32 data_len;
909 u32 oob_data_offset;
910 u32 oob_data_len;
911 u32 num_oob_data_elements;
912 u32 per_pkt_info_offset;
913 u32 per_pkt_info_len;
914 u32 vc_handle;
915 u32 reserved;
916};
917
918/* Optional Out of Band data associated with a Data message. */
919struct rndis_oobd {
920 u32 size;
921 u32 type;
922 u32 class_info_offset;
923};
924
925/* Packet extension field contents associated with a Data message. */
926struct rndis_per_packet_info {
927 u32 size;
928 u32 type;
929 u32 per_pkt_info_offset;
930};
931
932/* Format of Information buffer passed in a SetRequest for the OID */
933/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
934struct rndis_config_parameter_info {
935 u32 parameter_name_offset;
936 u32 parameter_name_length;
937 u32 parameter_type;
938 u32 parameter_value_offset;
939 u32 parameter_value_length;
940};
941
942/* Values for ParameterType in struct rndis_config_parameter_info */
943#define RNDIS_CONFIG_PARAM_TYPE_INTEGER 0
944#define RNDIS_CONFIG_PARAM_TYPE_STRING 2
945
946/* CONDIS Miniport messages for connection oriented devices */
947/* that do not implement a call manager. */
948
949/* CoNdisMiniportCreateVc message */
950struct rcondis_mp_create_vc {
951 u32 req_id;
952 u32 ndis_vc_handle;
953};
954
955/* Response to CoNdisMiniportCreateVc */
956struct rcondis_mp_create_vc_complete {
957 u32 req_id;
958 u32 dev_vc_handle;
959 u32 status;
960};
961
962/* CoNdisMiniportDeleteVc message */
963struct rcondis_mp_delete_vc {
964 u32 req_id;
965 u32 dev_vc_handle;
966};
967
968/* Response to CoNdisMiniportDeleteVc */
969struct rcondis_mp_delete_vc_complete {
970 u32 req_id;
971 u32 status;
972};
973
974/* CoNdisMiniportQueryRequest message */
975struct rcondis_mp_query_request {
976 u32 req_id;
977 u32 request_type;
978 u32 oid;
979 u32 dev_vc_handle;
980 u32 info_buflen;
981 u32 info_buf_offset;
982};
983
984/* CoNdisMiniportSetRequest message */
985struct rcondis_mp_set_request {
986 u32 req_id;
987 u32 request_type;
988 u32 oid;
989 u32 dev_vc_handle;
990 u32 info_buflen;
991 u32 info_buf_offset;
992};
993
994/* CoNdisIndicateStatus message */
995struct rcondis_indicate_status {
996 u32 ndis_vc_handle;
997 u32 status;
998 u32 status_buflen;
999 u32 status_buf_offset;
1000};
1001
1002/* CONDIS Call/VC parameters */
1003struct rcondis_specific_parameters {
1004 u32 parameter_type;
1005 u32 parameter_length;
1006 u32 parameter_lffset;
1007};
1008
1009struct rcondis_media_parameters {
1010 u32 flags;
1011 u32 reserved1;
1012 u32 reserved2;
1013 struct rcondis_specific_parameters media_specific;
1014};
1015
1016struct rndis_flowspec {
1017 u32 token_rate;
1018 u32 token_bucket_size;
1019 u32 peak_bandwidth;
1020 u32 latency;
1021 u32 delay_variation;
1022 u32 service_type;
1023 u32 max_sdu_size;
1024 u32 minimum_policed_size;
1025};
1026
1027struct rcondis_call_manager_parameters {
1028 struct rndis_flowspec transmit;
1029 struct rndis_flowspec receive;
1030 struct rcondis_specific_parameters call_mgr_specific;
1031};
1032
1033/* CoNdisMiniportActivateVc message */
1034struct rcondis_mp_activate_vc_request {
1035 u32 req_id;
1036 u32 flags;
1037 u32 dev_vc_handle;
1038 u32 media_params_offset;
1039 u32 media_params_length;
1040 u32 call_mgr_params_offset;
1041 u32 call_mgr_params_length;
1042};
1043
1044/* Response to CoNdisMiniportActivateVc */
1045struct rcondis_mp_activate_vc_complete {
1046 u32 req_id;
1047 u32 status;
1048};
1049
1050/* CoNdisMiniportDeactivateVc message */
1051struct rcondis_mp_deactivate_vc_request {
1052 u32 req_id;
1053 u32 flags;
1054 u32 dev_vc_handle;
1055};
1056
1057/* Response to CoNdisMiniportDeactivateVc */
1058struct rcondis_mp_deactivate_vc_complete {
1059 u32 req_id;
1060 u32 status;
1061};
1062
1063
1064/* union with all of the RNDIS messages */
1065union rndis_message_container {
1066 struct rndis_packet pkt;
1067 struct rndis_initialize_request init_req;
1068 struct rndis_halt_request halt_req;
1069 struct rndis_query_request query_req;
1070 struct rndis_set_request set_req;
1071 struct rndis_reset_request reset_req;
1072 struct rndis_keepalive_request keep_alive_req;
1073 struct rndis_indicate_status indicate_status;
1074 struct rndis_initialize_complete init_complete;
1075 struct rndis_query_complete query_complete;
1076 struct rndis_set_complete set_complete;
1077 struct rndis_reset_complete reset_complete;
1078 struct rndis_keepalive_complete keep_alive_complete;
1079 struct rcondis_mp_create_vc co_miniport_create_vc;
1080 struct rcondis_mp_delete_vc co_miniport_delete_vc;
1081 struct rcondis_indicate_status co_indicate_status;
1082 struct rcondis_mp_activate_vc_request co_miniport_activate_vc;
1083 struct rcondis_mp_deactivate_vc_request co_miniport_deactivate_vc;
1084 struct rcondis_mp_create_vc_complete co_miniport_create_vc_complete;
1085 struct rcondis_mp_delete_vc_complete co_miniport_delete_vc_complete;
1086 struct rcondis_mp_activate_vc_complete co_miniport_activate_vc_complete;
1087 struct rcondis_mp_deactivate_vc_complete
1088 co_miniport_deactivate_vc_complete;
1089};
1090
1091/* Remote NDIS message format */
1092struct rndis_message {
1093 u32 ndis_msg_type;
1094
1095 /* Total length of this message, from the beginning */
1096 /* of the sruct rndis_message, in bytes. */
1097 u32 msg_len;
1098
1099 /* Actual message */
1100 union rndis_message_container msg;
1101};
1102
1103
1104struct rndis_filter_packet {
1105 void *completion_ctx;
1106 void (*completion)(void *context);
1107 struct rndis_message msg;
1108};
1109
1110/* Handy macros */
1111
1112/* get the size of an RNDIS message. Pass in the message type, */
1113/* struct rndis_set_request, struct rndis_packet for example */
1114#define RNDIS_MESSAGE_SIZE(msg) \
1115 (sizeof(msg) + (sizeof(struct rndis_message) - \
1116 sizeof(union rndis_message_container)))
1117
1118/* get pointer to info buffer with message pointer */
1119#define MESSAGE_TO_INFO_BUFFER(msg) \
1120 (((unsigned char *)(msg)) + msg->info_buf_offset)
1121
1122/* get pointer to status buffer with message pointer */
1123#define MESSAGE_TO_STATUS_BUFFER(msg) \
1124 (((unsigned char *)(msg)) + msg->status_buf_offset)
1125
1126/* get pointer to OOBD buffer with message pointer */
1127#define MESSAGE_TO_OOBD_BUFFER(msg) \
1128 (((unsigned char *)(msg)) + msg->oob_data_offset)
1129
1130/* get pointer to data buffer with message pointer */
1131#define MESSAGE_TO_DATA_BUFFER(msg) \
1132 (((unsigned char *)(msg)) + msg->per_pkt_info_offset)
1133
1134/* get pointer to contained message from NDIS_MESSAGE pointer */
1135#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg) \
1136 ((void *) &rndis_msg->msg)
1137
1138/* get pointer to contained message from NDIS_MESSAGE pointer */
1139#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg) \
1140 ((void *) rndis_msg)
1141
1142
1143#define __struct_bcount(x)
1144
1145
1146
1147#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
1148 sizeof(union rndis_message_container))
1149
1150#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
1151#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
1152#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
1153#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
1154#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
1155#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
1156#define NDIS_PACKET_TYPE_SMT 0x00000040
1157#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
1158#define NDIS_PACKET_TYPE_GROUP 0x00000100
1159#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00000200
1160#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
1161#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
1162
1163
1164
1165#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
new file mode 100644
index 000000000000..8965b45ce5a5
--- /dev/null
+++ b/drivers/net/hyperv/netvsc.c
@@ -0,0 +1,932 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/wait.h>
26#include <linux/mm.h>
27#include <linux/delay.h>
28#include <linux/io.h>
29#include <linux/slab.h>
30#include <linux/netdevice.h>
31#include <linux/if_ether.h>
32
33#include "hyperv_net.h"
34
35
36static struct netvsc_device *alloc_net_device(struct hv_device *device)
37{
38 struct netvsc_device *net_device;
39 struct net_device *ndev = hv_get_drvdata(device);
40
41 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
42 if (!net_device)
43 return NULL;
44
45 net_device->start_remove = false;
46 net_device->destroy = false;
47 net_device->dev = device;
48 net_device->ndev = ndev;
49
50 hv_set_drvdata(device, net_device);
51 return net_device;
52}
53
54static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
55{
56 struct netvsc_device *net_device;
57
58 net_device = hv_get_drvdata(device);
59 if (net_device && net_device->destroy)
60 net_device = NULL;
61
62 return net_device;
63}
64
65static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
66{
67 struct netvsc_device *net_device;
68
69 net_device = hv_get_drvdata(device);
70
71 if (!net_device)
72 goto get_in_err;
73
74 if (net_device->destroy &&
75 atomic_read(&net_device->num_outstanding_sends) == 0)
76 net_device = NULL;
77
78get_in_err:
79 return net_device;
80}
81
82
83static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
84{
85 struct nvsp_message *revoke_packet;
86 int ret = 0;
87 struct net_device *ndev = net_device->ndev;
88
89 /*
90 * If we got a section count, it means we received a
91 * SendReceiveBufferComplete msg (ie sent
92 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
93 * to send a revoke msg here
94 */
95 if (net_device->recv_section_cnt) {
96 /* Send the revoke receive buffer */
97 revoke_packet = &net_device->revoke_packet;
98 memset(revoke_packet, 0, sizeof(struct nvsp_message));
99
100 revoke_packet->hdr.msg_type =
101 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
102 revoke_packet->msg.v1_msg.
103 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
104
105 ret = vmbus_sendpacket(net_device->dev->channel,
106 revoke_packet,
107 sizeof(struct nvsp_message),
108 (unsigned long)revoke_packet,
109 VM_PKT_DATA_INBAND, 0);
110 /*
111 * If we failed here, we might as well return and
112 * have a leak rather than continue and a bugchk
113 */
114 if (ret != 0) {
115 netdev_err(ndev, "unable to send "
116 "revoke receive buffer to netvsp\n");
117 return ret;
118 }
119 }
120
121 /* Teardown the gpadl on the vsp end */
122 if (net_device->recv_buf_gpadl_handle) {
123 ret = vmbus_teardown_gpadl(net_device->dev->channel,
124 net_device->recv_buf_gpadl_handle);
125
126 /* If we failed here, we might as well return and have a leak
127 * rather than continue and a bugchk
128 */
129 if (ret != 0) {
130 netdev_err(ndev,
131 "unable to teardown receive buffer's gpadl\n");
132 return ret;
133 }
134 net_device->recv_buf_gpadl_handle = 0;
135 }
136
137 if (net_device->recv_buf) {
138 /* Free up the receive buffer */
139 free_pages((unsigned long)net_device->recv_buf,
140 get_order(net_device->recv_buf_size));
141 net_device->recv_buf = NULL;
142 }
143
144 if (net_device->recv_section) {
145 net_device->recv_section_cnt = 0;
146 kfree(net_device->recv_section);
147 net_device->recv_section = NULL;
148 }
149
150 return ret;
151}
152
153static int netvsc_init_recv_buf(struct hv_device *device)
154{
155 int ret = 0;
156 int t;
157 struct netvsc_device *net_device;
158 struct nvsp_message *init_packet;
159 struct net_device *ndev;
160
161 net_device = get_outbound_net_device(device);
162 if (!net_device)
163 return -ENODEV;
164 ndev = net_device->ndev;
165
166 net_device->recv_buf =
167 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
168 get_order(net_device->recv_buf_size));
169 if (!net_device->recv_buf) {
170 netdev_err(ndev, "unable to allocate receive "
171 "buffer of size %d\n", net_device->recv_buf_size);
172 ret = -ENOMEM;
173 goto cleanup;
174 }
175
176 /*
177 * Establish the gpadl handle for this buffer on this
178 * channel. Note: This call uses the vmbus connection rather
179 * than the channel to establish the gpadl handle.
180 */
181 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
182 net_device->recv_buf_size,
183 &net_device->recv_buf_gpadl_handle);
184 if (ret != 0) {
185 netdev_err(ndev,
186 "unable to establish receive buffer's gpadl\n");
187 goto cleanup;
188 }
189
190
191 /* Notify the NetVsp of the gpadl handle */
192 init_packet = &net_device->channel_init_pkt;
193
194 memset(init_packet, 0, sizeof(struct nvsp_message));
195
196 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
197 init_packet->msg.v1_msg.send_recv_buf.
198 gpadl_handle = net_device->recv_buf_gpadl_handle;
199 init_packet->msg.v1_msg.
200 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
201
202 /* Send the gpadl notification request */
203 ret = vmbus_sendpacket(device->channel, init_packet,
204 sizeof(struct nvsp_message),
205 (unsigned long)init_packet,
206 VM_PKT_DATA_INBAND,
207 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
208 if (ret != 0) {
209 netdev_err(ndev,
210 "unable to send receive buffer's gpadl to netvsp\n");
211 goto cleanup;
212 }
213
214 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
215 BUG_ON(t == 0);
216
217
218 /* Check the response */
219 if (init_packet->msg.v1_msg.
220 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
221 netdev_err(ndev, "Unable to complete receive buffer "
222 "initialization with NetVsp - status %d\n",
223 init_packet->msg.v1_msg.
224 send_recv_buf_complete.status);
225 ret = -EINVAL;
226 goto cleanup;
227 }
228
229 /* Parse the response */
230
231 net_device->recv_section_cnt = init_packet->msg.
232 v1_msg.send_recv_buf_complete.num_sections;
233
234 net_device->recv_section = kmemdup(
235 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
236 net_device->recv_section_cnt *
237 sizeof(struct nvsp_1_receive_buffer_section),
238 GFP_KERNEL);
239 if (net_device->recv_section == NULL) {
240 ret = -EINVAL;
241 goto cleanup;
242 }
243
244 /*
245 * For 1st release, there should only be 1 section that represents the
246 * entire receive buffer
247 */
248 if (net_device->recv_section_cnt != 1 ||
249 net_device->recv_section->offset != 0) {
250 ret = -EINVAL;
251 goto cleanup;
252 }
253
254 goto exit;
255
256cleanup:
257 netvsc_destroy_recv_buf(net_device);
258
259exit:
260 return ret;
261}
262
263
264/* Negotiate NVSP protocol version */
265static int negotiate_nvsp_ver(struct hv_device *device,
266 struct netvsc_device *net_device,
267 struct nvsp_message *init_packet,
268 u32 nvsp_ver)
269{
270 int ret, t;
271
272 memset(init_packet, 0, sizeof(struct nvsp_message));
273 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
274 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
275 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
276
277 /* Send the init request */
278 ret = vmbus_sendpacket(device->channel, init_packet,
279 sizeof(struct nvsp_message),
280 (unsigned long)init_packet,
281 VM_PKT_DATA_INBAND,
282 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
283
284 if (ret != 0)
285 return ret;
286
287 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
288
289 if (t == 0)
290 return -ETIMEDOUT;
291
292 if (init_packet->msg.init_msg.init_complete.status !=
293 NVSP_STAT_SUCCESS)
294 return -EINVAL;
295
296 if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
297 return 0;
298
299 /* NVSPv2 only: Send NDIS config */
300 memset(init_packet, 0, sizeof(struct nvsp_message));
301 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
302 init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
303
304 ret = vmbus_sendpacket(device->channel, init_packet,
305 sizeof(struct nvsp_message),
306 (unsigned long)init_packet,
307 VM_PKT_DATA_INBAND, 0);
308
309 return ret;
310}
311
312static int netvsc_connect_vsp(struct hv_device *device)
313{
314 int ret;
315 struct netvsc_device *net_device;
316 struct nvsp_message *init_packet;
317 int ndis_version;
318 struct net_device *ndev;
319
320 net_device = get_outbound_net_device(device);
321 if (!net_device)
322 return -ENODEV;
323 ndev = net_device->ndev;
324
325 init_packet = &net_device->channel_init_pkt;
326
327 /* Negotiate the latest NVSP protocol supported */
328 if (negotiate_nvsp_ver(device, net_device, init_packet,
329 NVSP_PROTOCOL_VERSION_2) == 0) {
330 net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
331 } else if (negotiate_nvsp_ver(device, net_device, init_packet,
332 NVSP_PROTOCOL_VERSION_1) == 0) {
333 net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
334 } else {
335 ret = -EPROTO;
336 goto cleanup;
337 }
338
339 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
340
341 /* Send the ndis version */
342 memset(init_packet, 0, sizeof(struct nvsp_message));
343
344 ndis_version = 0x00050000;
345
346 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
347 init_packet->msg.v1_msg.
348 send_ndis_ver.ndis_major_ver =
349 (ndis_version & 0xFFFF0000) >> 16;
350 init_packet->msg.v1_msg.
351 send_ndis_ver.ndis_minor_ver =
352 ndis_version & 0xFFFF;
353
354 /* Send the init request */
355 ret = vmbus_sendpacket(device->channel, init_packet,
356 sizeof(struct nvsp_message),
357 (unsigned long)init_packet,
358 VM_PKT_DATA_INBAND, 0);
359 if (ret != 0)
360 goto cleanup;
361
362 /* Post the big receive buffer to NetVSP */
363 ret = netvsc_init_recv_buf(device);
364
365cleanup:
366 return ret;
367}
368
369static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
370{
371 netvsc_destroy_recv_buf(net_device);
372}
373
374/*
375 * netvsc_device_remove - Callback when the root bus device is removed
376 */
377int netvsc_device_remove(struct hv_device *device)
378{
379 struct netvsc_device *net_device;
380 struct hv_netvsc_packet *netvsc_packet, *pos;
381 unsigned long flags;
382
383 net_device = hv_get_drvdata(device);
384 spin_lock_irqsave(&device->channel->inbound_lock, flags);
385 net_device->destroy = true;
386 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
387
388 /* Wait for all send completions */
389 while (atomic_read(&net_device->num_outstanding_sends)) {
390 dev_info(&device->device,
391 "waiting for %d requests to complete...\n",
392 atomic_read(&net_device->num_outstanding_sends));
393 udelay(100);
394 }
395
396 netvsc_disconnect_vsp(net_device);
397
398 /*
399 * Since we have already drained, we don't need to busy wait
400 * as was done in final_release_stor_device()
401 * Note that we cannot set the ext pointer to NULL until
402 * we have drained - to drain the outgoing packets, we need to
403 * allow incoming packets.
404 */
405
406 spin_lock_irqsave(&device->channel->inbound_lock, flags);
407 hv_set_drvdata(device, NULL);
408 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
409
410 /*
411 * At this point, no one should be accessing net_device
412 * except in here
413 */
414 dev_notice(&device->device, "net device safe to remove\n");
415
416 /* Now, we can close the channel safely */
417 vmbus_close(device->channel);
418
419 /* Release all resources */
420 list_for_each_entry_safe(netvsc_packet, pos,
421 &net_device->recv_pkt_list, list_ent) {
422 list_del(&netvsc_packet->list_ent);
423 kfree(netvsc_packet);
424 }
425
426 kfree(net_device);
427 return 0;
428}
429
430static void netvsc_send_completion(struct hv_device *device,
431 struct vmpacket_descriptor *packet)
432{
433 struct netvsc_device *net_device;
434 struct nvsp_message *nvsp_packet;
435 struct hv_netvsc_packet *nvsc_packet;
436 struct net_device *ndev;
437
438 net_device = get_inbound_net_device(device);
439 if (!net_device)
440 return;
441 ndev = net_device->ndev;
442
443 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
444 (packet->offset8 << 3));
445
446 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
447 (nvsp_packet->hdr.msg_type ==
448 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
449 (nvsp_packet->hdr.msg_type ==
450 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
451 /* Copy the response back */
452 memcpy(&net_device->channel_init_pkt, nvsp_packet,
453 sizeof(struct nvsp_message));
454 complete(&net_device->channel_init_wait);
455 } else if (nvsp_packet->hdr.msg_type ==
456 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
457 /* Get the send context */
458 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
459 packet->trans_id;
460
461 /* Notify the layer above us */
462 nvsc_packet->completion.send.send_completion(
463 nvsc_packet->completion.send.send_completion_ctx);
464
465 atomic_dec(&net_device->num_outstanding_sends);
466
467 if (netif_queue_stopped(ndev) && !net_device->start_remove)
468 netif_wake_queue(ndev);
469 } else {
470 netdev_err(ndev, "Unknown send completion packet type- "
471 "%d received!!\n", nvsp_packet->hdr.msg_type);
472 }
473
474}
475
476int netvsc_send(struct hv_device *device,
477 struct hv_netvsc_packet *packet)
478{
479 struct netvsc_device *net_device;
480 int ret = 0;
481 struct nvsp_message sendMessage;
482 struct net_device *ndev;
483
484 net_device = get_outbound_net_device(device);
485 if (!net_device)
486 return -ENODEV;
487 ndev = net_device->ndev;
488
489 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
490 if (packet->is_data_pkt) {
491 /* 0 is RMC_DATA; */
492 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
493 } else {
494 /* 1 is RMC_CONTROL; */
495 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
496 }
497
498 /* Not using send buffer section */
499 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
500 0xFFFFFFFF;
501 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
502
503 if (packet->page_buf_cnt) {
504 ret = vmbus_sendpacket_pagebuffer(device->channel,
505 packet->page_buf,
506 packet->page_buf_cnt,
507 &sendMessage,
508 sizeof(struct nvsp_message),
509 (unsigned long)packet);
510 } else {
511 ret = vmbus_sendpacket(device->channel, &sendMessage,
512 sizeof(struct nvsp_message),
513 (unsigned long)packet,
514 VM_PKT_DATA_INBAND,
515 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
516
517 }
518
519 if (ret == 0) {
520 atomic_inc(&net_device->num_outstanding_sends);
521 } else if (ret == -EAGAIN) {
522 netif_stop_queue(ndev);
523 if (atomic_read(&net_device->num_outstanding_sends) < 1)
524 netif_wake_queue(ndev);
525 } else {
526 netdev_err(ndev, "Unable to send packet %p ret %d\n",
527 packet, ret);
528 }
529
530 return ret;
531}
532
533static void netvsc_send_recv_completion(struct hv_device *device,
534 u64 transaction_id)
535{
536 struct nvsp_message recvcompMessage;
537 int retries = 0;
538 int ret;
539 struct net_device *ndev;
540 struct netvsc_device *net_device = hv_get_drvdata(device);
541
542 ndev = net_device->ndev;
543
544 recvcompMessage.hdr.msg_type =
545 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
546
547 /* FIXME: Pass in the status */
548 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
549 NVSP_STAT_SUCCESS;
550
551retry_send_cmplt:
552 /* Send the completion */
553 ret = vmbus_sendpacket(device->channel, &recvcompMessage,
554 sizeof(struct nvsp_message), transaction_id,
555 VM_PKT_COMP, 0);
556 if (ret == 0) {
557 /* success */
558 /* no-op */
559 } else if (ret == -EAGAIN) {
560 /* no more room...wait a bit and attempt to retry 3 times */
561 retries++;
562 netdev_err(ndev, "unable to send receive completion pkt"
563 " (tid %llx)...retrying %d\n", transaction_id, retries);
564
565 if (retries < 4) {
566 udelay(100);
567 goto retry_send_cmplt;
568 } else {
569 netdev_err(ndev, "unable to send receive "
570 "completion pkt (tid %llx)...give up retrying\n",
571 transaction_id);
572 }
573 } else {
574 netdev_err(ndev, "unable to send receive "
575 "completion pkt - %llx\n", transaction_id);
576 }
577}
578
579/* Send a receive completion packet to RNDIS device (ie NetVsp) */
580static void netvsc_receive_completion(void *context)
581{
582 struct hv_netvsc_packet *packet = context;
583 struct hv_device *device = (struct hv_device *)packet->device;
584 struct netvsc_device *net_device;
585 u64 transaction_id = 0;
586 bool fsend_receive_comp = false;
587 unsigned long flags;
588 struct net_device *ndev;
589
590 /*
591 * Even though it seems logical to do a GetOutboundNetDevice() here to
592 * send out receive completion, we are using GetInboundNetDevice()
593 * since we may have disable outbound traffic already.
594 */
595 net_device = get_inbound_net_device(device);
596 if (!net_device)
597 return;
598 ndev = net_device->ndev;
599
600 /* Overloading use of the lock. */
601 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
602
603 packet->xfer_page_pkt->count--;
604
605 /*
606 * Last one in the line that represent 1 xfer page packet.
607 * Return the xfer page packet itself to the freelist
608 */
609 if (packet->xfer_page_pkt->count == 0) {
610 fsend_receive_comp = true;
611 transaction_id = packet->completion.recv.recv_completion_tid;
612 list_add_tail(&packet->xfer_page_pkt->list_ent,
613 &net_device->recv_pkt_list);
614
615 }
616
617 /* Put the packet back */
618 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
619 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
620
621 /* Send a receive completion for the xfer page packet */
622 if (fsend_receive_comp)
623 netvsc_send_recv_completion(device, transaction_id);
624
625}
626
627static void netvsc_receive(struct hv_device *device,
628 struct vmpacket_descriptor *packet)
629{
630 struct netvsc_device *net_device;
631 struct vmtransfer_page_packet_header *vmxferpage_packet;
632 struct nvsp_message *nvsp_packet;
633 struct hv_netvsc_packet *netvsc_packet = NULL;
634 /* struct netvsc_driver *netvscDriver; */
635 struct xferpage_packet *xferpage_packet = NULL;
636 int i;
637 int count = 0;
638 unsigned long flags;
639 struct net_device *ndev;
640
641 LIST_HEAD(listHead);
642
643 net_device = get_inbound_net_device(device);
644 if (!net_device)
645 return;
646 ndev = net_device->ndev;
647
648 /*
649 * All inbound packets other than send completion should be xfer page
650 * packet
651 */
652 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
653 netdev_err(ndev, "Unknown packet type received - %d\n",
654 packet->type);
655 return;
656 }
657
658 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
659 (packet->offset8 << 3));
660
661 /* Make sure this is a valid nvsp packet */
662 if (nvsp_packet->hdr.msg_type !=
663 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
664 netdev_err(ndev, "Unknown nvsp packet type received-"
665 " %d\n", nvsp_packet->hdr.msg_type);
666 return;
667 }
668
669 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
670
671 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
672 netdev_err(ndev, "Invalid xfer page set id - "
673 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
674 vmxferpage_packet->xfer_pageset_id);
675 return;
676 }
677
678 /*
679 * Grab free packets (range count + 1) to represent this xfer
680 * page packet. +1 to represent the xfer page packet itself.
681 * We grab it here so that we know exactly how many we can
682 * fulfil
683 */
684 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
685 while (!list_empty(&net_device->recv_pkt_list)) {
686 list_move_tail(net_device->recv_pkt_list.next, &listHead);
687 if (++count == vmxferpage_packet->range_cnt + 1)
688 break;
689 }
690 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
691
692 /*
693 * We need at least 2 netvsc pkts (1 to represent the xfer
694 * page and at least 1 for the range) i.e. we can handled
695 * some of the xfer page packet ranges...
696 */
697 if (count < 2) {
698 netdev_err(ndev, "Got only %d netvsc pkt...needed "
699 "%d pkts. Dropping this xfer page packet completely!\n",
700 count, vmxferpage_packet->range_cnt + 1);
701
702 /* Return it to the freelist */
703 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
704 for (i = count; i != 0; i--) {
705 list_move_tail(listHead.next,
706 &net_device->recv_pkt_list);
707 }
708 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
709 flags);
710
711 netvsc_send_recv_completion(device,
712 vmxferpage_packet->d.trans_id);
713
714 return;
715 }
716
717 /* Remove the 1st packet to represent the xfer page packet itself */
718 xferpage_packet = (struct xferpage_packet *)listHead.next;
719 list_del(&xferpage_packet->list_ent);
720
721 /* This is how much we can satisfy */
722 xferpage_packet->count = count - 1;
723
724 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
725 netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
726 "this xfer page...got %d\n",
727 vmxferpage_packet->range_cnt, xferpage_packet->count);
728 }
729
730 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
731 for (i = 0; i < (count - 1); i++) {
732 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
733 list_del(&netvsc_packet->list_ent);
734
735 /* Initialize the netvsc packet */
736 netvsc_packet->xfer_page_pkt = xferpage_packet;
737 netvsc_packet->completion.recv.recv_completion =
738 netvsc_receive_completion;
739 netvsc_packet->completion.recv.recv_completion_ctx =
740 netvsc_packet;
741 netvsc_packet->device = device;
742 /* Save this so that we can send it back */
743 netvsc_packet->completion.recv.recv_completion_tid =
744 vmxferpage_packet->d.trans_id;
745
746 netvsc_packet->data = (void *)((unsigned long)net_device->
747 recv_buf + vmxferpage_packet->ranges[i].byte_offset);
748 netvsc_packet->total_data_buflen =
749 vmxferpage_packet->ranges[i].byte_count;
750
751 /* Pass it to the upper layer */
752 rndis_filter_receive(device, netvsc_packet);
753
754 netvsc_receive_completion(netvsc_packet->
755 completion.recv.recv_completion_ctx);
756 }
757
758}
759
760static void netvsc_channel_cb(void *context)
761{
762 int ret;
763 struct hv_device *device = context;
764 struct netvsc_device *net_device;
765 u32 bytes_recvd;
766 u64 request_id;
767 unsigned char *packet;
768 struct vmpacket_descriptor *desc;
769 unsigned char *buffer;
770 int bufferlen = NETVSC_PACKET_SIZE;
771 struct net_device *ndev;
772
773 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
774 GFP_ATOMIC);
775 if (!packet)
776 return;
777 buffer = packet;
778
779 net_device = get_inbound_net_device(device);
780 if (!net_device)
781 goto out;
782 ndev = net_device->ndev;
783
784 do {
785 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
786 &bytes_recvd, &request_id);
787 if (ret == 0) {
788 if (bytes_recvd > 0) {
789 desc = (struct vmpacket_descriptor *)buffer;
790 switch (desc->type) {
791 case VM_PKT_COMP:
792 netvsc_send_completion(device, desc);
793 break;
794
795 case VM_PKT_DATA_USING_XFER_PAGES:
796 netvsc_receive(device, desc);
797 break;
798
799 default:
800 netdev_err(ndev,
801 "unhandled packet type %d, "
802 "tid %llx len %d\n",
803 desc->type, request_id,
804 bytes_recvd);
805 break;
806 }
807
808 /* reset */
809 if (bufferlen > NETVSC_PACKET_SIZE) {
810 kfree(buffer);
811 buffer = packet;
812 bufferlen = NETVSC_PACKET_SIZE;
813 }
814 } else {
815 /* reset */
816 if (bufferlen > NETVSC_PACKET_SIZE) {
817 kfree(buffer);
818 buffer = packet;
819 bufferlen = NETVSC_PACKET_SIZE;
820 }
821
822 break;
823 }
824 } else if (ret == -ENOBUFS) {
825 /* Handle large packet */
826 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
827 if (buffer == NULL) {
828 /* Try again next time around */
829 netdev_err(ndev,
830 "unable to allocate buffer of size "
831 "(%d)!!\n", bytes_recvd);
832 break;
833 }
834
835 bufferlen = bytes_recvd;
836 }
837 } while (1);
838
839out:
840 kfree(buffer);
841 return;
842}
843
844/*
845 * netvsc_device_add - Callback when the device belonging to this
846 * driver is added
847 */
848int netvsc_device_add(struct hv_device *device, void *additional_info)
849{
850 int ret = 0;
851 int i;
852 int ring_size =
853 ((struct netvsc_device_info *)additional_info)->ring_size;
854 struct netvsc_device *net_device;
855 struct hv_netvsc_packet *packet, *pos;
856 struct net_device *ndev;
857
858 net_device = alloc_net_device(device);
859 if (!net_device) {
860 ret = -ENOMEM;
861 goto cleanup;
862 }
863
864 /*
865 * Coming into this function, struct net_device * is
866 * registered as the driver private data.
867 * In alloc_net_device(), we register struct netvsc_device *
868 * as the driver private data and stash away struct net_device *
869 * in struct netvsc_device *.
870 */
871 ndev = net_device->ndev;
872
873 /* Initialize the NetVSC channel extension */
874 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
875 spin_lock_init(&net_device->recv_pkt_list_lock);
876
877 INIT_LIST_HEAD(&net_device->recv_pkt_list);
878
879 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
880 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
881 (NETVSC_RECEIVE_SG_COUNT *
882 sizeof(struct hv_page_buffer)), GFP_KERNEL);
883 if (!packet)
884 break;
885
886 list_add_tail(&packet->list_ent,
887 &net_device->recv_pkt_list);
888 }
889 init_completion(&net_device->channel_init_wait);
890
891 /* Open the channel */
892 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
893 ring_size * PAGE_SIZE, NULL, 0,
894 netvsc_channel_cb, device);
895
896 if (ret != 0) {
897 netdev_err(ndev, "unable to open channel: %d\n", ret);
898 goto cleanup;
899 }
900
901 /* Channel is opened */
902 pr_info("hv_netvsc channel opened successfully\n");
903
904 /* Connect with the NetVsp */
905 ret = netvsc_connect_vsp(device);
906 if (ret != 0) {
907 netdev_err(ndev,
908 "unable to connect to NetVSP - %d\n", ret);
909 goto close;
910 }
911
912 return ret;
913
914close:
915 /* Now, we can close the channel safely */
916 vmbus_close(device->channel);
917
918cleanup:
919
920 if (net_device) {
921 list_for_each_entry_safe(packet, pos,
922 &net_device->recv_pkt_list,
923 list_ent) {
924 list_del(&packet->list_ent);
925 kfree(packet);
926 }
927
928 kfree(net_device);
929 }
930
931 return ret;
932}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
new file mode 100644
index 000000000000..462d05f05e84
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -0,0 +1,508 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/init.h>
24#include <linux/atomic.h>
25#include <linux/module.h>
26#include <linux/highmem.h>
27#include <linux/device.h>
28#include <linux/io.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/inetdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/in.h>
35#include <linux/slab.h>
36#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
40
41#include "hyperv_net.h"
42
43struct net_device_context {
44 /* point back to our device context */
45 struct hv_device *device_ctx;
46 struct delayed_work dwork;
47};
48
49
50static int ring_size = 128;
51module_param(ring_size, int, S_IRUGO);
52MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
53
54struct set_multicast_work {
55 struct work_struct work;
56 struct net_device *net;
57};
58
59static void do_set_multicast(struct work_struct *w)
60{
61 struct set_multicast_work *swk =
62 container_of(w, struct set_multicast_work, work);
63 struct net_device *net = swk->net;
64
65 struct net_device_context *ndevctx = netdev_priv(net);
66 struct netvsc_device *nvdev;
67 struct rndis_device *rdev;
68
69 nvdev = hv_get_drvdata(ndevctx->device_ctx);
70 if (nvdev == NULL)
71 return;
72
73 rdev = nvdev->extension;
74 if (rdev == NULL)
75 return;
76
77 if (net->flags & IFF_PROMISC)
78 rndis_filter_set_packet_filter(rdev,
79 NDIS_PACKET_TYPE_PROMISCUOUS);
80 else
81 rndis_filter_set_packet_filter(rdev,
82 NDIS_PACKET_TYPE_BROADCAST |
83 NDIS_PACKET_TYPE_ALL_MULTICAST |
84 NDIS_PACKET_TYPE_DIRECTED);
85
86 kfree(w);
87}
88
89static void netvsc_set_multicast_list(struct net_device *net)
90{
91 struct set_multicast_work *swk =
92 kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
93 if (swk == NULL)
94 return;
95
96 swk->net = net;
97 INIT_WORK(&swk->work, do_set_multicast);
98 schedule_work(&swk->work);
99}
100
101static int netvsc_open(struct net_device *net)
102{
103 struct net_device_context *net_device_ctx = netdev_priv(net);
104 struct hv_device *device_obj = net_device_ctx->device_ctx;
105 int ret = 0;
106
107 /* Open up the device */
108 ret = rndis_filter_open(device_obj);
109 if (ret != 0) {
110 netdev_err(net, "unable to open device (ret %d).\n", ret);
111 return ret;
112 }
113
114 netif_start_queue(net);
115
116 return ret;
117}
118
119static int netvsc_close(struct net_device *net)
120{
121 struct net_device_context *net_device_ctx = netdev_priv(net);
122 struct hv_device *device_obj = net_device_ctx->device_ctx;
123 int ret;
124
125 netif_stop_queue(net);
126
127 ret = rndis_filter_close(device_obj);
128 if (ret != 0)
129 netdev_err(net, "unable to close device (ret %d).\n", ret);
130
131 return ret;
132}
133
134static void netvsc_xmit_completion(void *context)
135{
136 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
137 struct sk_buff *skb = (struct sk_buff *)
138 (unsigned long)packet->completion.send.send_completion_tid;
139
140 kfree(packet);
141
142 if (skb)
143 dev_kfree_skb_any(skb);
144}
145
146static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
147{
148 struct net_device_context *net_device_ctx = netdev_priv(net);
149 struct hv_netvsc_packet *packet;
150 int ret;
151 unsigned int i, num_pages, npg_data;
152
153 /* Add multipage for skb->data and additional one for RNDIS */
154 npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
155 >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
156 num_pages = skb_shinfo(skb)->nr_frags + npg_data + 1;
157
158 /* Allocate a netvsc packet based on # of frags. */
159 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
160 (num_pages * sizeof(struct hv_page_buffer)) +
161 sizeof(struct rndis_filter_packet), GFP_ATOMIC);
162 if (!packet) {
163 /* out of memory, drop packet */
164 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
165
166 dev_kfree_skb(skb);
167 net->stats.tx_dropped++;
168 return NETDEV_TX_BUSY;
169 }
170
171 packet->extension = (void *)(unsigned long)packet +
172 sizeof(struct hv_netvsc_packet) +
173 (num_pages * sizeof(struct hv_page_buffer));
174
175 /* Setup the rndis header */
176 packet->page_buf_cnt = num_pages;
177
178 /* Initialize it from the skb */
179 packet->total_data_buflen = skb->len;
180
181 /* Start filling in the page buffers starting after RNDIS buffer. */
182 packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
183 packet->page_buf[1].offset
184 = (unsigned long)skb->data & (PAGE_SIZE - 1);
185 if (npg_data == 1)
186 packet->page_buf[1].len = skb_headlen(skb);
187 else
188 packet->page_buf[1].len = PAGE_SIZE
189 - packet->page_buf[1].offset;
190
191 for (i = 2; i <= npg_data; i++) {
192 packet->page_buf[i].pfn = virt_to_phys(skb->data
193 + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
194 packet->page_buf[i].offset = 0;
195 packet->page_buf[i].len = PAGE_SIZE;
196 }
197 if (npg_data > 1)
198 packet->page_buf[npg_data].len = (((unsigned long)skb->data
199 + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
200
201 /* Additional fragments are after SKB data */
202 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
203 const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
204
205 packet->page_buf[i+npg_data+1].pfn =
206 page_to_pfn(skb_frag_page(f));
207 packet->page_buf[i+npg_data+1].offset = f->page_offset;
208 packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
209 }
210
211 /* Set the completion routine */
212 packet->completion.send.send_completion = netvsc_xmit_completion;
213 packet->completion.send.send_completion_ctx = packet;
214 packet->completion.send.send_completion_tid = (unsigned long)skb;
215
216 ret = rndis_filter_send(net_device_ctx->device_ctx,
217 packet);
218 if (ret == 0) {
219 net->stats.tx_bytes += skb->len;
220 net->stats.tx_packets++;
221 } else {
222 /* we are shutting down or bus overloaded, just drop packet */
223 net->stats.tx_dropped++;
224 kfree(packet);
225 dev_kfree_skb_any(skb);
226 }
227
228 return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
229}
230
231/*
232 * netvsc_linkstatus_callback - Link up/down notification
233 */
234void netvsc_linkstatus_callback(struct hv_device *device_obj,
235 unsigned int status)
236{
237 struct net_device *net;
238 struct net_device_context *ndev_ctx;
239 struct netvsc_device *net_device;
240
241 net_device = hv_get_drvdata(device_obj);
242 net = net_device->ndev;
243
244 if (!net) {
245 netdev_err(net, "got link status but net device "
246 "not initialized yet\n");
247 return;
248 }
249
250 if (status == 1) {
251 netif_carrier_on(net);
252 netif_wake_queue(net);
253 ndev_ctx = netdev_priv(net);
254 schedule_delayed_work(&ndev_ctx->dwork, 0);
255 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
256 } else {
257 netif_carrier_off(net);
258 netif_stop_queue(net);
259 }
260}
261
262/*
263 * netvsc_recv_callback - Callback when we receive a packet from the
264 * "wire" on the specified device.
265 */
266int netvsc_recv_callback(struct hv_device *device_obj,
267 struct hv_netvsc_packet *packet)
268{
269 struct net_device *net = dev_get_drvdata(&device_obj->device);
270 struct sk_buff *skb;
271 struct netvsc_device *net_device;
272
273 net_device = hv_get_drvdata(device_obj);
274 net = net_device->ndev;
275
276 if (!net) {
277 netdev_err(net, "got receive callback but net device"
278 " not initialized yet\n");
279 return 0;
280 }
281
282 /* Allocate a skb - TODO direct I/O to pages? */
283 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
284 if (unlikely(!skb)) {
285 ++net->stats.rx_dropped;
286 return 0;
287 }
288
289 /*
290 * Copy to skb. This copy is needed here since the memory pointed by
291 * hv_netvsc_packet cannot be deallocated
292 */
293 memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
294 packet->total_data_buflen);
295
296 skb->protocol = eth_type_trans(skb, net);
297 skb->ip_summed = CHECKSUM_NONE;
298
299 net->stats.rx_packets++;
300 net->stats.rx_bytes += skb->len;
301
302 /*
303 * Pass the skb back up. Network stack will deallocate the skb when it
304 * is done.
305 * TODO - use NAPI?
306 */
307 netif_rx(skb);
308
309 return 0;
310}
311
312static void netvsc_get_drvinfo(struct net_device *net,
313 struct ethtool_drvinfo *info)
314{
315 strcpy(info->driver, "hv_netvsc");
316 strcpy(info->version, HV_DRV_VERSION);
317 strcpy(info->fw_version, "N/A");
318}
319
320static int netvsc_change_mtu(struct net_device *ndev, int mtu)
321{
322 struct net_device_context *ndevctx = netdev_priv(ndev);
323 struct hv_device *hdev = ndevctx->device_ctx;
324 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
325 struct netvsc_device_info device_info;
326 int limit = ETH_DATA_LEN;
327
328 if (nvdev == NULL || nvdev->destroy)
329 return -ENODEV;
330
331 if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
332 limit = NETVSC_MTU;
333
334 if (mtu < 68 || mtu > limit)
335 return -EINVAL;
336
337 nvdev->start_remove = true;
338 cancel_delayed_work_sync(&ndevctx->dwork);
339 netif_stop_queue(ndev);
340 rndis_filter_device_remove(hdev);
341
342 ndev->mtu = mtu;
343
344 ndevctx->device_ctx = hdev;
345 hv_set_drvdata(hdev, ndev);
346 device_info.ring_size = ring_size;
347 rndis_filter_device_add(hdev, &device_info);
348 netif_wake_queue(ndev);
349
350 return 0;
351}
352
353static const struct ethtool_ops ethtool_ops = {
354 .get_drvinfo = netvsc_get_drvinfo,
355 .get_link = ethtool_op_get_link,
356};
357
358static const struct net_device_ops device_ops = {
359 .ndo_open = netvsc_open,
360 .ndo_stop = netvsc_close,
361 .ndo_start_xmit = netvsc_start_xmit,
362 .ndo_set_rx_mode = netvsc_set_multicast_list,
363 .ndo_change_mtu = netvsc_change_mtu,
364 .ndo_validate_addr = eth_validate_addr,
365 .ndo_set_mac_address = eth_mac_addr,
366};
367
368/*
369 * Send GARP packet to network peers after migrations.
370 * After Quick Migration, the network is not immediately operational in the
371 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
372 * another netif_notify_peers() into a delayed work, otherwise GARP packet
373 * will not be sent after quick migration, and cause network disconnection.
374 */
375static void netvsc_send_garp(struct work_struct *w)
376{
377 struct net_device_context *ndev_ctx;
378 struct net_device *net;
379 struct netvsc_device *net_device;
380
381 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
382 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
383 net = net_device->ndev;
384 netif_notify_peers(net);
385}
386
387
388static int netvsc_probe(struct hv_device *dev,
389 const struct hv_vmbus_device_id *dev_id)
390{
391 struct net_device *net = NULL;
392 struct net_device_context *net_device_ctx;
393 struct netvsc_device_info device_info;
394 int ret;
395
396 net = alloc_etherdev(sizeof(struct net_device_context));
397 if (!net)
398 return -ENOMEM;
399
400 /* Set initial state */
401 netif_carrier_off(net);
402
403 net_device_ctx = netdev_priv(net);
404 net_device_ctx->device_ctx = dev;
405 hv_set_drvdata(dev, net);
406 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
407
408 net->netdev_ops = &device_ops;
409
410 /* TODO: Add GSO and Checksum offload */
411 net->hw_features = NETIF_F_SG;
412 net->features = NETIF_F_SG;
413
414 SET_ETHTOOL_OPS(net, &ethtool_ops);
415 SET_NETDEV_DEV(net, &dev->device);
416
417 ret = register_netdev(net);
418 if (ret != 0) {
419 pr_err("Unable to register netdev.\n");
420 free_netdev(net);
421 goto out;
422 }
423
424 /* Notify the netvsc driver of the new device */
425 device_info.ring_size = ring_size;
426 ret = rndis_filter_device_add(dev, &device_info);
427 if (ret != 0) {
428 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
429 unregister_netdev(net);
430 free_netdev(net);
431 hv_set_drvdata(dev, NULL);
432 return ret;
433 }
434 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
435
436 netif_carrier_on(net);
437
438out:
439 return ret;
440}
441
442static int netvsc_remove(struct hv_device *dev)
443{
444 struct net_device *net;
445 struct net_device_context *ndev_ctx;
446 struct netvsc_device *net_device;
447
448 net_device = hv_get_drvdata(dev);
449 net = net_device->ndev;
450
451 if (net == NULL) {
452 dev_err(&dev->device, "No net device to remove\n");
453 return 0;
454 }
455
456 net_device->start_remove = true;
457
458 ndev_ctx = netdev_priv(net);
459 cancel_delayed_work_sync(&ndev_ctx->dwork);
460
461 /* Stop outbound asap */
462 netif_stop_queue(net);
463
464 unregister_netdev(net);
465
466 /*
467 * Call to the vsc driver to let it know that the device is being
468 * removed
469 */
470 rndis_filter_device_remove(dev);
471
472 free_netdev(net);
473 return 0;
474}
475
476static const struct hv_vmbus_device_id id_table[] = {
477 /* Network guid */
478 { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
479 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
480 { },
481};
482
483MODULE_DEVICE_TABLE(vmbus, id_table);
484
485/* The one and only one */
486static struct hv_driver netvsc_drv = {
487 .name = "netvsc",
488 .id_table = id_table,
489 .probe = netvsc_probe,
490 .remove = netvsc_remove,
491};
492
493static void __exit netvsc_drv_exit(void)
494{
495 vmbus_driver_unregister(&netvsc_drv);
496}
497
498static int __init netvsc_drv_init(void)
499{
500 return vmbus_driver_register(&netvsc_drv);
501}
502
503MODULE_LICENSE("GPL");
504MODULE_VERSION(HV_DRV_VERSION);
505MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
506
507module_init(netvsc_drv_init);
508module_exit(netvsc_drv_exit);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
new file mode 100644
index 000000000000..da181f9a49d1
--- /dev/null
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -0,0 +1,817 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/wait.h>
24#include <linux/highmem.h>
25#include <linux/slab.h>
26#include <linux/io.h>
27#include <linux/if_ether.h>
28#include <linux/netdevice.h>
29
30#include "hyperv_net.h"
31
32
33struct rndis_request {
34 struct list_head list_ent;
35 struct completion wait_event;
36
37 /*
38 * FIXME: We assumed a fixed size response here. If we do ever need to
39 * handle a bigger response, we can either define a max response
40 * message or add a response buffer variable above this field
41 */
42 struct rndis_message response_msg;
43
44 /* Simplify allocation by having a netvsc packet inline */
45 struct hv_netvsc_packet pkt;
46 struct hv_page_buffer buf;
47 /* FIXME: We assumed a fixed size request here. */
48 struct rndis_message request_msg;
49};
50
51static void rndis_filter_send_completion(void *ctx);
52
53static void rndis_filter_send_request_completion(void *ctx);
54
55
56
57static struct rndis_device *get_rndis_device(void)
58{
59 struct rndis_device *device;
60
61 device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
62 if (!device)
63 return NULL;
64
65 spin_lock_init(&device->request_lock);
66
67 INIT_LIST_HEAD(&device->req_list);
68
69 device->state = RNDIS_DEV_UNINITIALIZED;
70
71 return device;
72}
73
74static struct rndis_request *get_rndis_request(struct rndis_device *dev,
75 u32 msg_type,
76 u32 msg_len)
77{
78 struct rndis_request *request;
79 struct rndis_message *rndis_msg;
80 struct rndis_set_request *set;
81 unsigned long flags;
82
83 request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
84 if (!request)
85 return NULL;
86
87 init_completion(&request->wait_event);
88
89 rndis_msg = &request->request_msg;
90 rndis_msg->ndis_msg_type = msg_type;
91 rndis_msg->msg_len = msg_len;
92
93 /*
94 * Set the request id. This field is always after the rndis header for
95 * request/response packet types so we just used the SetRequest as a
96 * template
97 */
98 set = &rndis_msg->msg.set_req;
99 set->req_id = atomic_inc_return(&dev->new_req_id);
100
101 /* Add to the request list */
102 spin_lock_irqsave(&dev->request_lock, flags);
103 list_add_tail(&request->list_ent, &dev->req_list);
104 spin_unlock_irqrestore(&dev->request_lock, flags);
105
106 return request;
107}
108
109static void put_rndis_request(struct rndis_device *dev,
110 struct rndis_request *req)
111{
112 unsigned long flags;
113
114 spin_lock_irqsave(&dev->request_lock, flags);
115 list_del(&req->list_ent);
116 spin_unlock_irqrestore(&dev->request_lock, flags);
117
118 kfree(req);
119}
120
121static void dump_rndis_message(struct hv_device *hv_dev,
122 struct rndis_message *rndis_msg)
123{
124 struct net_device *netdev;
125 struct netvsc_device *net_device;
126
127 net_device = hv_get_drvdata(hv_dev);
128 netdev = net_device->ndev;
129
130 switch (rndis_msg->ndis_msg_type) {
131 case REMOTE_NDIS_PACKET_MSG:
132 netdev_dbg(netdev, "REMOTE_NDIS_PACKET_MSG (len %u, "
133 "data offset %u data len %u, # oob %u, "
134 "oob offset %u, oob len %u, pkt offset %u, "
135 "pkt len %u\n",
136 rndis_msg->msg_len,
137 rndis_msg->msg.pkt.data_offset,
138 rndis_msg->msg.pkt.data_len,
139 rndis_msg->msg.pkt.num_oob_data_elements,
140 rndis_msg->msg.pkt.oob_data_offset,
141 rndis_msg->msg.pkt.oob_data_len,
142 rndis_msg->msg.pkt.per_pkt_info_offset,
143 rndis_msg->msg.pkt.per_pkt_info_len);
144 break;
145
146 case REMOTE_NDIS_INITIALIZE_CMPLT:
147 netdev_dbg(netdev, "REMOTE_NDIS_INITIALIZE_CMPLT "
148 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
149 "device flags %d, max xfer size 0x%x, max pkts %u, "
150 "pkt aligned %u)\n",
151 rndis_msg->msg_len,
152 rndis_msg->msg.init_complete.req_id,
153 rndis_msg->msg.init_complete.status,
154 rndis_msg->msg.init_complete.major_ver,
155 rndis_msg->msg.init_complete.minor_ver,
156 rndis_msg->msg.init_complete.dev_flags,
157 rndis_msg->msg.init_complete.max_xfer_size,
158 rndis_msg->msg.init_complete.
159 max_pkt_per_msg,
160 rndis_msg->msg.init_complete.
161 pkt_alignment_factor);
162 break;
163
164 case REMOTE_NDIS_QUERY_CMPLT:
165 netdev_dbg(netdev, "REMOTE_NDIS_QUERY_CMPLT "
166 "(len %u, id 0x%x, status 0x%x, buf len %u, "
167 "buf offset %u)\n",
168 rndis_msg->msg_len,
169 rndis_msg->msg.query_complete.req_id,
170 rndis_msg->msg.query_complete.status,
171 rndis_msg->msg.query_complete.
172 info_buflen,
173 rndis_msg->msg.query_complete.
174 info_buf_offset);
175 break;
176
177 case REMOTE_NDIS_SET_CMPLT:
178 netdev_dbg(netdev,
179 "REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)\n",
180 rndis_msg->msg_len,
181 rndis_msg->msg.set_complete.req_id,
182 rndis_msg->msg.set_complete.status);
183 break;
184
185 case REMOTE_NDIS_INDICATE_STATUS_MSG:
186 netdev_dbg(netdev, "REMOTE_NDIS_INDICATE_STATUS_MSG "
187 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
188 rndis_msg->msg_len,
189 rndis_msg->msg.indicate_status.status,
190 rndis_msg->msg.indicate_status.status_buflen,
191 rndis_msg->msg.indicate_status.status_buf_offset);
192 break;
193
194 default:
195 netdev_dbg(netdev, "0x%x (len %u)\n",
196 rndis_msg->ndis_msg_type,
197 rndis_msg->msg_len);
198 break;
199 }
200}
201
202static int rndis_filter_send_request(struct rndis_device *dev,
203 struct rndis_request *req)
204{
205 int ret;
206 struct hv_netvsc_packet *packet;
207
208 /* Setup the packet to send it */
209 packet = &req->pkt;
210
211 packet->is_data_pkt = false;
212 packet->total_data_buflen = req->request_msg.msg_len;
213 packet->page_buf_cnt = 1;
214
215 packet->page_buf[0].pfn = virt_to_phys(&req->request_msg) >>
216 PAGE_SHIFT;
217 packet->page_buf[0].len = req->request_msg.msg_len;
218 packet->page_buf[0].offset =
219 (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
220
221 packet->completion.send.send_completion_ctx = req;/* packet; */
222 packet->completion.send.send_completion =
223 rndis_filter_send_request_completion;
224 packet->completion.send.send_completion_tid = (unsigned long)dev;
225
226 ret = netvsc_send(dev->net_dev->dev, packet);
227 return ret;
228}
229
230static void rndis_filter_receive_response(struct rndis_device *dev,
231 struct rndis_message *resp)
232{
233 struct rndis_request *request = NULL;
234 bool found = false;
235 unsigned long flags;
236 struct net_device *ndev;
237
238 ndev = dev->net_dev->ndev;
239
240 spin_lock_irqsave(&dev->request_lock, flags);
241 list_for_each_entry(request, &dev->req_list, list_ent) {
242 /*
243 * All request/response message contains RequestId as the 1st
244 * field
245 */
246 if (request->request_msg.msg.init_req.req_id
247 == resp->msg.init_complete.req_id) {
248 found = true;
249 break;
250 }
251 }
252 spin_unlock_irqrestore(&dev->request_lock, flags);
253
254 if (found) {
255 if (resp->msg_len <= sizeof(struct rndis_message)) {
256 memcpy(&request->response_msg, resp,
257 resp->msg_len);
258 } else {
259 netdev_err(ndev,
260 "rndis response buffer overflow "
261 "detected (size %u max %zu)\n",
262 resp->msg_len,
263 sizeof(struct rndis_filter_packet));
264
265 if (resp->ndis_msg_type ==
266 REMOTE_NDIS_RESET_CMPLT) {
267 /* does not have a request id field */
268 request->response_msg.msg.reset_complete.
269 status = STATUS_BUFFER_OVERFLOW;
270 } else {
271 request->response_msg.msg.
272 init_complete.status =
273 STATUS_BUFFER_OVERFLOW;
274 }
275 }
276
277 complete(&request->wait_event);
278 } else {
279 netdev_err(ndev,
280 "no rndis request found for this response "
281 "(id 0x%x res type 0x%x)\n",
282 resp->msg.init_complete.req_id,
283 resp->ndis_msg_type);
284 }
285}
286
287static void rndis_filter_receive_indicate_status(struct rndis_device *dev,
288 struct rndis_message *resp)
289{
290 struct rndis_indicate_status *indicate =
291 &resp->msg.indicate_status;
292
293 if (indicate->status == RNDIS_STATUS_MEDIA_CONNECT) {
294 netvsc_linkstatus_callback(
295 dev->net_dev->dev, 1);
296 } else if (indicate->status == RNDIS_STATUS_MEDIA_DISCONNECT) {
297 netvsc_linkstatus_callback(
298 dev->net_dev->dev, 0);
299 } else {
300 /*
301 * TODO:
302 */
303 }
304}
305
306static void rndis_filter_receive_data(struct rndis_device *dev,
307 struct rndis_message *msg,
308 struct hv_netvsc_packet *pkt)
309{
310 struct rndis_packet *rndis_pkt;
311 u32 data_offset;
312
313 rndis_pkt = &msg->msg.pkt;
314
315 /*
316 * FIXME: Handle multiple rndis pkt msgs that maybe enclosed in this
317 * netvsc packet (ie TotalDataBufferLength != MessageLength)
318 */
319
320 /* Remove the rndis header and pass it back up the stack */
321 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
322
323 pkt->total_data_buflen -= data_offset;
324 pkt->data = (void *)((unsigned long)pkt->data + data_offset);
325
326 pkt->is_data_pkt = true;
327
328 netvsc_recv_callback(dev->net_dev->dev, pkt);
329}
330
331int rndis_filter_receive(struct hv_device *dev,
332 struct hv_netvsc_packet *pkt)
333{
334 struct netvsc_device *net_dev = hv_get_drvdata(dev);
335 struct rndis_device *rndis_dev;
336 struct rndis_message rndis_msg;
337 struct rndis_message *rndis_hdr;
338 struct net_device *ndev;
339
340 if (!net_dev)
341 return -EINVAL;
342
343 ndev = net_dev->ndev;
344
345 /* Make sure the rndis device state is initialized */
346 if (!net_dev->extension) {
347 netdev_err(ndev, "got rndis message but no rndis device - "
348 "dropping this message!\n");
349 return -ENODEV;
350 }
351
352 rndis_dev = (struct rndis_device *)net_dev->extension;
353 if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
354 netdev_err(ndev, "got rndis message but rndis device "
355 "uninitialized...dropping this message!\n");
356 return -ENODEV;
357 }
358
359 rndis_hdr = pkt->data;
360
361 /* Make sure we got a valid rndis message */
362 if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
363 (rndis_hdr->msg_len > sizeof(struct rndis_message))) {
364 netdev_err(ndev, "incoming rndis message buffer overflow "
365 "detected (got %u, max %zu)..marking it an error!\n",
366 rndis_hdr->msg_len,
367 sizeof(struct rndis_message));
368 }
369
370 memcpy(&rndis_msg, rndis_hdr,
371 (rndis_hdr->msg_len > sizeof(struct rndis_message)) ?
372 sizeof(struct rndis_message) :
373 rndis_hdr->msg_len);
374
375 dump_rndis_message(dev, &rndis_msg);
376
377 switch (rndis_msg.ndis_msg_type) {
378 case REMOTE_NDIS_PACKET_MSG:
379 /* data msg */
380 rndis_filter_receive_data(rndis_dev, &rndis_msg, pkt);
381 break;
382
383 case REMOTE_NDIS_INITIALIZE_CMPLT:
384 case REMOTE_NDIS_QUERY_CMPLT:
385 case REMOTE_NDIS_SET_CMPLT:
386 /* completion msgs */
387 rndis_filter_receive_response(rndis_dev, &rndis_msg);
388 break;
389
390 case REMOTE_NDIS_INDICATE_STATUS_MSG:
391 /* notification msgs */
392 rndis_filter_receive_indicate_status(rndis_dev, &rndis_msg);
393 break;
394 default:
395 netdev_err(ndev,
396 "unhandled rndis message (type %u len %u)\n",
397 rndis_msg.ndis_msg_type,
398 rndis_msg.msg_len);
399 break;
400 }
401
402 return 0;
403}
404
405static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
406 void *result, u32 *result_size)
407{
408 struct rndis_request *request;
409 u32 inresult_size = *result_size;
410 struct rndis_query_request *query;
411 struct rndis_query_complete *query_complete;
412 int ret = 0;
413 int t;
414
415 if (!result)
416 return -EINVAL;
417
418 *result_size = 0;
419 request = get_rndis_request(dev, REMOTE_NDIS_QUERY_MSG,
420 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
421 if (!request) {
422 ret = -ENOMEM;
423 goto cleanup;
424 }
425
426 /* Setup the rndis query */
427 query = &request->request_msg.msg.query_req;
428 query->oid = oid;
429 query->info_buf_offset = sizeof(struct rndis_query_request);
430 query->info_buflen = 0;
431 query->dev_vc_handle = 0;
432
433 ret = rndis_filter_send_request(dev, request);
434 if (ret != 0)
435 goto cleanup;
436
437 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
438 if (t == 0) {
439 ret = -ETIMEDOUT;
440 goto cleanup;
441 }
442
443 /* Copy the response back */
444 query_complete = &request->response_msg.msg.query_complete;
445
446 if (query_complete->info_buflen > inresult_size) {
447 ret = -1;
448 goto cleanup;
449 }
450
451 memcpy(result,
452 (void *)((unsigned long)query_complete +
453 query_complete->info_buf_offset),
454 query_complete->info_buflen);
455
456 *result_size = query_complete->info_buflen;
457
458cleanup:
459 if (request)
460 put_rndis_request(dev, request);
461
462 return ret;
463}
464
465static int rndis_filter_query_device_mac(struct rndis_device *dev)
466{
467 u32 size = ETH_ALEN;
468
469 return rndis_filter_query_device(dev,
470 RNDIS_OID_802_3_PERMANENT_ADDRESS,
471 dev->hw_mac_adr, &size);
472}
473
474static int rndis_filter_query_device_link_status(struct rndis_device *dev)
475{
476 u32 size = sizeof(u32);
477 u32 link_status;
478 int ret;
479
480 ret = rndis_filter_query_device(dev,
481 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
482 &link_status, &size);
483 dev->link_state = (link_status != 0) ? true : false;
484
485 return ret;
486}
487
488int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
489{
490 struct rndis_request *request;
491 struct rndis_set_request *set;
492 struct rndis_set_complete *set_complete;
493 u32 status;
494 int ret, t;
495 struct net_device *ndev;
496
497 ndev = dev->net_dev->ndev;
498
499 request = get_rndis_request(dev, REMOTE_NDIS_SET_MSG,
500 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
501 sizeof(u32));
502 if (!request) {
503 ret = -ENOMEM;
504 goto cleanup;
505 }
506
507 /* Setup the rndis set */
508 set = &request->request_msg.msg.set_req;
509 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
510 set->info_buflen = sizeof(u32);
511 set->info_buf_offset = sizeof(struct rndis_set_request);
512
513 memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
514 &new_filter, sizeof(u32));
515
516 ret = rndis_filter_send_request(dev, request);
517 if (ret != 0)
518 goto cleanup;
519
520 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
521
522 if (t == 0) {
523 netdev_err(ndev,
524 "timeout before we got a set response...\n");
525 /*
526 * We can't deallocate the request since we may still receive a
527 * send completion for it.
528 */
529 goto exit;
530 } else {
531 set_complete = &request->response_msg.msg.set_complete;
532 status = set_complete->status;
533 }
534
535cleanup:
536 if (request)
537 put_rndis_request(dev, request);
538exit:
539 return ret;
540}
541
542
543static int rndis_filter_init_device(struct rndis_device *dev)
544{
545 struct rndis_request *request;
546 struct rndis_initialize_request *init;
547 struct rndis_initialize_complete *init_complete;
548 u32 status;
549 int ret, t;
550
551 request = get_rndis_request(dev, REMOTE_NDIS_INITIALIZE_MSG,
552 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
553 if (!request) {
554 ret = -ENOMEM;
555 goto cleanup;
556 }
557
558 /* Setup the rndis set */
559 init = &request->request_msg.msg.init_req;
560 init->major_ver = RNDIS_MAJOR_VERSION;
561 init->minor_ver = RNDIS_MINOR_VERSION;
562 /* FIXME: Use 1536 - rounded ethernet frame size */
563 init->max_xfer_size = 2048;
564
565 dev->state = RNDIS_DEV_INITIALIZING;
566
567 ret = rndis_filter_send_request(dev, request);
568 if (ret != 0) {
569 dev->state = RNDIS_DEV_UNINITIALIZED;
570 goto cleanup;
571 }
572
573
574 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
575
576 if (t == 0) {
577 ret = -ETIMEDOUT;
578 goto cleanup;
579 }
580
581 init_complete = &request->response_msg.msg.init_complete;
582 status = init_complete->status;
583 if (status == RNDIS_STATUS_SUCCESS) {
584 dev->state = RNDIS_DEV_INITIALIZED;
585 ret = 0;
586 } else {
587 dev->state = RNDIS_DEV_UNINITIALIZED;
588 ret = -EINVAL;
589 }
590
591cleanup:
592 if (request)
593 put_rndis_request(dev, request);
594
595 return ret;
596}
597
598static void rndis_filter_halt_device(struct rndis_device *dev)
599{
600 struct rndis_request *request;
601 struct rndis_halt_request *halt;
602
603 /* Attempt to do a rndis device halt */
604 request = get_rndis_request(dev, REMOTE_NDIS_HALT_MSG,
605 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
606 if (!request)
607 goto cleanup;
608
609 /* Setup the rndis set */
610 halt = &request->request_msg.msg.halt_req;
611 halt->req_id = atomic_inc_return(&dev->new_req_id);
612
613 /* Ignore return since this msg is optional. */
614 rndis_filter_send_request(dev, request);
615
616 dev->state = RNDIS_DEV_UNINITIALIZED;
617
618cleanup:
619 if (request)
620 put_rndis_request(dev, request);
621 return;
622}
623
624static int rndis_filter_open_device(struct rndis_device *dev)
625{
626 int ret;
627
628 if (dev->state != RNDIS_DEV_INITIALIZED)
629 return 0;
630
631 ret = rndis_filter_set_packet_filter(dev,
632 NDIS_PACKET_TYPE_BROADCAST |
633 NDIS_PACKET_TYPE_ALL_MULTICAST |
634 NDIS_PACKET_TYPE_DIRECTED);
635 if (ret == 0)
636 dev->state = RNDIS_DEV_DATAINITIALIZED;
637
638 return ret;
639}
640
641static int rndis_filter_close_device(struct rndis_device *dev)
642{
643 int ret;
644
645 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
646 return 0;
647
648 ret = rndis_filter_set_packet_filter(dev, 0);
649 if (ret == 0)
650 dev->state = RNDIS_DEV_INITIALIZED;
651
652 return ret;
653}
654
655int rndis_filter_device_add(struct hv_device *dev,
656 void *additional_info)
657{
658 int ret;
659 struct netvsc_device *net_device;
660 struct rndis_device *rndis_device;
661 struct netvsc_device_info *device_info = additional_info;
662
663 rndis_device = get_rndis_device();
664 if (!rndis_device)
665 return -ENODEV;
666
667 /*
668 * Let the inner driver handle this first to create the netvsc channel
669 * NOTE! Once the channel is created, we may get a receive callback
670 * (RndisFilterOnReceive()) before this call is completed
671 */
672 ret = netvsc_device_add(dev, additional_info);
673 if (ret != 0) {
674 kfree(rndis_device);
675 return ret;
676 }
677
678
679 /* Initialize the rndis device */
680 net_device = hv_get_drvdata(dev);
681
682 net_device->extension = rndis_device;
683 rndis_device->net_dev = net_device;
684
685 /* Send the rndis initialization message */
686 ret = rndis_filter_init_device(rndis_device);
687 if (ret != 0) {
688 /*
689 * TODO: If rndis init failed, we will need to shut down the
690 * channel
691 */
692 }
693
694 /* Get the mac address */
695 ret = rndis_filter_query_device_mac(rndis_device);
696 if (ret != 0) {
697 /*
698 * TODO: shutdown rndis device and the channel
699 */
700 }
701
702 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
703
704 rndis_filter_query_device_link_status(rndis_device);
705
706 device_info->link_state = rndis_device->link_state;
707
708 dev_info(&dev->device, "Device MAC %pM link state %s\n",
709 rndis_device->hw_mac_adr,
710 device_info->link_state ? "down" : "up");
711
712 return ret;
713}
714
715void rndis_filter_device_remove(struct hv_device *dev)
716{
717 struct netvsc_device *net_dev = hv_get_drvdata(dev);
718 struct rndis_device *rndis_dev = net_dev->extension;
719
720 /* Halt and release the rndis device */
721 rndis_filter_halt_device(rndis_dev);
722
723 kfree(rndis_dev);
724 net_dev->extension = NULL;
725
726 netvsc_device_remove(dev);
727}
728
729
730int rndis_filter_open(struct hv_device *dev)
731{
732 struct netvsc_device *net_device = hv_get_drvdata(dev);
733
734 if (!net_device)
735 return -EINVAL;
736
737 return rndis_filter_open_device(net_device->extension);
738}
739
740int rndis_filter_close(struct hv_device *dev)
741{
742 struct netvsc_device *netDevice = hv_get_drvdata(dev);
743
744 if (!netDevice)
745 return -EINVAL;
746
747 return rndis_filter_close_device(netDevice->extension);
748}
749
750int rndis_filter_send(struct hv_device *dev,
751 struct hv_netvsc_packet *pkt)
752{
753 int ret;
754 struct rndis_filter_packet *filterPacket;
755 struct rndis_message *rndisMessage;
756 struct rndis_packet *rndisPacket;
757 u32 rndisMessageSize;
758
759 /* Add the rndis header */
760 filterPacket = (struct rndis_filter_packet *)pkt->extension;
761
762 memset(filterPacket, 0, sizeof(struct rndis_filter_packet));
763
764 rndisMessage = &filterPacket->msg;
765 rndisMessageSize = RNDIS_MESSAGE_SIZE(struct rndis_packet);
766
767 rndisMessage->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
768 rndisMessage->msg_len = pkt->total_data_buflen +
769 rndisMessageSize;
770
771 rndisPacket = &rndisMessage->msg.pkt;
772 rndisPacket->data_offset = sizeof(struct rndis_packet);
773 rndisPacket->data_len = pkt->total_data_buflen;
774
775 pkt->is_data_pkt = true;
776 pkt->page_buf[0].pfn = virt_to_phys(rndisMessage) >> PAGE_SHIFT;
777 pkt->page_buf[0].offset =
778 (unsigned long)rndisMessage & (PAGE_SIZE-1);
779 pkt->page_buf[0].len = rndisMessageSize;
780
781 /* Save the packet send completion and context */
782 filterPacket->completion = pkt->completion.send.send_completion;
783 filterPacket->completion_ctx =
784 pkt->completion.send.send_completion_ctx;
785
786 /* Use ours */
787 pkt->completion.send.send_completion = rndis_filter_send_completion;
788 pkt->completion.send.send_completion_ctx = filterPacket;
789
790 ret = netvsc_send(dev, pkt);
791 if (ret != 0) {
792 /*
793 * Reset the completion to originals to allow retries from
794 * above
795 */
796 pkt->completion.send.send_completion =
797 filterPacket->completion;
798 pkt->completion.send.send_completion_ctx =
799 filterPacket->completion_ctx;
800 }
801
802 return ret;
803}
804
805static void rndis_filter_send_completion(void *ctx)
806{
807 struct rndis_filter_packet *filterPacket = ctx;
808
809 /* Pass it back to the original handler */
810 filterPacket->completion(filterPacket->completion_ctx);
811}
812
813
814static void rndis_filter_send_request_completion(void *ctx)
815{
816 /* Noop */
817}