aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-12-09 22:01:27 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2011-12-09 22:01:27 -0500
commit407f3fd8faf80f77c47ebda7501c6a8698d2f3a6 (patch)
tree3b41623f7174f1bee5b31853d2b295028cf9e376 /drivers/net
parentdc47ce90c3a822cd7c9e9339fe4d5f61dcb26b50 (diff)
parent1d06825b0ede541f63b5577435abd2fc649a9b5e (diff)
Merge 3.2-rc5 into staging-next
This resolves the conflict in the drivers/staging/iio/industrialio-core.c file due to two different changes made to resolve the same problem. Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/hyperv/Kconfig5
-rw-r--r--drivers/net/hyperv/Makefile3
-rw-r--r--drivers/net/hyperv/hyperv_net.h1082
-rw-r--r--drivers/net/hyperv/netvsc.c949
-rw-r--r--drivers/net/hyperv/netvsc_drv.c474
-rw-r--r--drivers/net/hyperv/rndis_filter.c834
8 files changed, 3351 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 654a5e94e0e7..99aa7faf5553 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -338,4 +338,6 @@ config VMXNET3
338 To compile this driver as a module, choose M here: the 338 To compile this driver as a module, choose M here: the
339 module will be called vmxnet3. 339 module will be called vmxnet3.
340 340
341source "drivers/net/hyperv/Kconfig"
342
341endif # NETDEVICES 343endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa877cd2b139..a81192b902ed 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -66,3 +66,5 @@ obj-$(CONFIG_USB_USBNET) += usb/
66obj-$(CONFIG_USB_ZD1201) += usb/ 66obj-$(CONFIG_USB_ZD1201) += usb/
67obj-$(CONFIG_USB_IPHETH) += usb/ 67obj-$(CONFIG_USB_IPHETH) += usb/
68obj-$(CONFIG_USB_CDC_PHONET) += usb/ 68obj-$(CONFIG_USB_CDC_PHONET) += usb/
69
70obj-$(CONFIG_HYPERV_NET) += hyperv/
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
new file mode 100644
index 000000000000..936968d23559
--- /dev/null
+++ b/drivers/net/hyperv/Kconfig
@@ -0,0 +1,5 @@
1config HYPERV_NET
2 tristate "Microsoft Hyper-V virtual network driver"
3 depends on HYPERV
4 help
5 Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
new file mode 100644
index 000000000000..c8a66827100c
--- /dev/null
+++ b/drivers/net/hyperv/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
2
3hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
new file mode 100644
index 000000000000..49b131f71d7a
--- /dev/null
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -0,0 +1,1082 @@
1/*
2 *
3 * Copyright (c) 2011, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24
25#ifndef _HYPERV_NET_H
26#define _HYPERV_NET_H
27
28#include <linux/list.h>
29#include <linux/hyperv.h>
30
31/* Fwd declaration */
32struct hv_netvsc_packet;
33
34/* Represent the xfer page packet which contains 1 or more netvsc packet */
35struct xferpage_packet {
36 struct list_head list_ent;
37
38 /* # of netvsc packets this xfer packet contains */
39 u32 count;
40};
41
42/* The number of pages which are enough to cover jumbo frame buffer. */
43#define NETVSC_PACKET_MAXPAGE 4
44
45/*
46 * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
47 * within the RNDIS
48 */
49struct hv_netvsc_packet {
50 /* Bookkeeping stuff */
51 struct list_head list_ent;
52
53 struct hv_device *device;
54 bool is_data_pkt;
55
56 /*
57 * Valid only for receives when we break a xfer page packet
58 * into multiple netvsc packets
59 */
60 struct xferpage_packet *xfer_page_pkt;
61
62 union {
63 struct {
64 u64 recv_completion_tid;
65 void *recv_completion_ctx;
66 void (*recv_completion)(void *context);
67 } recv;
68 struct {
69 u64 send_completion_tid;
70 void *send_completion_ctx;
71 void (*send_completion)(void *context);
72 } send;
73 } completion;
74
75 /* This points to the memory after page_buf */
76 void *extension;
77
78 u32 total_data_buflen;
79 /* Points to the send/receive buffer where the ethernet frame is */
80 u32 page_buf_cnt;
81 struct hv_page_buffer page_buf[NETVSC_PACKET_MAXPAGE];
82};
83
84struct netvsc_device_info {
85 unsigned char mac_adr[6];
86 bool link_state; /* 0 - link up, 1 - link down */
87 int ring_size;
88};
89
90enum rndis_device_state {
91 RNDIS_DEV_UNINITIALIZED = 0,
92 RNDIS_DEV_INITIALIZING,
93 RNDIS_DEV_INITIALIZED,
94 RNDIS_DEV_DATAINITIALIZED,
95};
96
97struct rndis_device {
98 struct netvsc_device *net_dev;
99
100 enum rndis_device_state state;
101 bool link_state;
102 atomic_t new_req_id;
103
104 spinlock_t request_lock;
105 struct list_head req_list;
106
107 unsigned char hw_mac_adr[ETH_ALEN];
108};
109
110
111/* Interface */
112int netvsc_device_add(struct hv_device *device, void *additional_info);
113int netvsc_device_remove(struct hv_device *device);
114int netvsc_send(struct hv_device *device,
115 struct hv_netvsc_packet *packet);
116void netvsc_linkstatus_callback(struct hv_device *device_obj,
117 unsigned int status);
118int netvsc_recv_callback(struct hv_device *device_obj,
119 struct hv_netvsc_packet *packet);
120int rndis_filter_open(struct hv_device *dev);
121int rndis_filter_close(struct hv_device *dev);
122int rndis_filter_device_add(struct hv_device *dev,
123 void *additional_info);
124void rndis_filter_device_remove(struct hv_device *dev);
125int rndis_filter_receive(struct hv_device *dev,
126 struct hv_netvsc_packet *pkt);
127
128
129
130int rndis_filter_send(struct hv_device *dev,
131 struct hv_netvsc_packet *pkt);
132
133int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
134
135
136#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
137
138#define NVSP_PROTOCOL_VERSION_1 2
139#define NVSP_MIN_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
140#define NVSP_MAX_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
141
142enum {
143 NVSP_MSG_TYPE_NONE = 0,
144
145 /* Init Messages */
146 NVSP_MSG_TYPE_INIT = 1,
147 NVSP_MSG_TYPE_INIT_COMPLETE = 2,
148
149 NVSP_VERSION_MSG_START = 100,
150
151 /* Version 1 Messages */
152 NVSP_MSG1_TYPE_SEND_NDIS_VER = NVSP_VERSION_MSG_START,
153
154 NVSP_MSG1_TYPE_SEND_RECV_BUF,
155 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE,
156 NVSP_MSG1_TYPE_REVOKE_RECV_BUF,
157
158 NVSP_MSG1_TYPE_SEND_SEND_BUF,
159 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE,
160 NVSP_MSG1_TYPE_REVOKE_SEND_BUF,
161
162 NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
163 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
164
165 /*
166 * This should be set to the number of messages for the version with
167 * the maximum number of messages.
168 */
169 NVSP_NUM_MSG_PER_VERSION = 9,
170};
171
172enum {
173 NVSP_STAT_NONE = 0,
174 NVSP_STAT_SUCCESS,
175 NVSP_STAT_FAIL,
176 NVSP_STAT_PROTOCOL_TOO_NEW,
177 NVSP_STAT_PROTOCOL_TOO_OLD,
178 NVSP_STAT_INVALID_RNDIS_PKT,
179 NVSP_STAT_BUSY,
180 NVSP_STAT_MAX,
181};
182
183struct nvsp_message_header {
184 u32 msg_type;
185};
186
187/* Init Messages */
188
189/*
190 * This message is used by the VSC to initialize the channel after the channels
191 * has been opened. This message should never include anything other then
192 * versioning (i.e. this message will be the same for ever).
193 */
194struct nvsp_message_init {
195 u32 min_protocol_ver;
196 u32 max_protocol_ver;
197} __packed;
198
199/*
200 * This message is used by the VSP to complete the initialization of the
201 * channel. This message should never include anything other then versioning
202 * (i.e. this message will be the same for ever).
203 */
204struct nvsp_message_init_complete {
205 u32 negotiated_protocol_ver;
206 u32 max_mdl_chain_len;
207 u32 status;
208} __packed;
209
210union nvsp_message_init_uber {
211 struct nvsp_message_init init;
212 struct nvsp_message_init_complete init_complete;
213} __packed;
214
215/* Version 1 Messages */
216
217/*
218 * This message is used by the VSC to send the NDIS version to the VSP. The VSP
219 * can use this information when handling OIDs sent by the VSC.
220 */
221struct nvsp_1_message_send_ndis_version {
222 u32 ndis_major_ver;
223 u32 ndis_minor_ver;
224} __packed;
225
226/*
227 * This message is used by the VSC to send a receive buffer to the VSP. The VSP
228 * can then use the receive buffer to send data to the VSC.
229 */
230struct nvsp_1_message_send_receive_buffer {
231 u32 gpadl_handle;
232 u16 id;
233} __packed;
234
235struct nvsp_1_receive_buffer_section {
236 u32 offset;
237 u32 sub_alloc_size;
238 u32 num_sub_allocs;
239 u32 end_offset;
240} __packed;
241
242/*
243 * This message is used by the VSP to acknowledge a receive buffer send by the
244 * VSC. This message must be sent by the VSP before the VSP uses the receive
245 * buffer.
246 */
247struct nvsp_1_message_send_receive_buffer_complete {
248 u32 status;
249 u32 num_sections;
250
251 /*
252 * The receive buffer is split into two parts, a large suballocation
253 * section and a small suballocation section. These sections are then
254 * suballocated by a certain size.
255 */
256
257 /*
258 * For example, the following break up of the receive buffer has 6
259 * large suballocations and 10 small suballocations.
260 */
261
262 /*
263 * | Large Section | | Small Section |
264 * ------------------------------------------------------------
265 * | | | | | | | | | | | | | | | | | |
266 * | |
267 * LargeOffset SmallOffset
268 */
269
270 struct nvsp_1_receive_buffer_section sections[1];
271} __packed;
272
273/*
274 * This message is sent by the VSC to revoke the receive buffer. After the VSP
275 * completes this transaction, the vsp should never use the receive buffer
276 * again.
277 */
278struct nvsp_1_message_revoke_receive_buffer {
279 u16 id;
280};
281
282/*
283 * This message is used by the VSC to send a send buffer to the VSP. The VSC
284 * can then use the send buffer to send data to the VSP.
285 */
286struct nvsp_1_message_send_send_buffer {
287 u32 gpadl_handle;
288 u16 id;
289} __packed;
290
291/*
292 * This message is used by the VSP to acknowledge a send buffer sent by the
293 * VSC. This message must be sent by the VSP before the VSP uses the sent
294 * buffer.
295 */
296struct nvsp_1_message_send_send_buffer_complete {
297 u32 status;
298
299 /*
300 * The VSC gets to choose the size of the send buffer and the VSP gets
301 * to choose the sections size of the buffer. This was done to enable
302 * dynamic reconfigurations when the cost of GPA-direct buffers
303 * decreases.
304 */
305 u32 section_size;
306} __packed;
307
308/*
309 * This message is sent by the VSC to revoke the send buffer. After the VSP
310 * completes this transaction, the vsp should never use the send buffer again.
311 */
312struct nvsp_1_message_revoke_send_buffer {
313 u16 id;
314};
315
316/*
317 * This message is used by both the VSP and the VSC to send a RNDIS message to
318 * the opposite channel endpoint.
319 */
320struct nvsp_1_message_send_rndis_packet {
321 /*
322 * This field is specified by RNIDS. They assume there's two different
323 * channels of communication. However, the Network VSP only has one.
324 * Therefore, the channel travels with the RNDIS packet.
325 */
326 u32 channel_type;
327
328 /*
329 * This field is used to send part or all of the data through a send
330 * buffer. This values specifies an index into the send buffer. If the
331 * index is 0xFFFFFFFF, then the send buffer is not being used and all
332 * of the data was sent through other VMBus mechanisms.
333 */
334 u32 send_buf_section_index;
335 u32 send_buf_section_size;
336} __packed;
337
338/*
339 * This message is used by both the VSP and the VSC to complete a RNDIS message
340 * to the opposite channel endpoint. At this point, the initiator of this
341 * message cannot use any resources associated with the original RNDIS packet.
342 */
343struct nvsp_1_message_send_rndis_packet_complete {
344 u32 status;
345};
346
347union nvsp_1_message_uber {
348 struct nvsp_1_message_send_ndis_version send_ndis_ver;
349
350 struct nvsp_1_message_send_receive_buffer send_recv_buf;
351 struct nvsp_1_message_send_receive_buffer_complete
352 send_recv_buf_complete;
353 struct nvsp_1_message_revoke_receive_buffer revoke_recv_buf;
354
355 struct nvsp_1_message_send_send_buffer send_send_buf;
356 struct nvsp_1_message_send_send_buffer_complete send_send_buf_complete;
357 struct nvsp_1_message_revoke_send_buffer revoke_send_buf;
358
359 struct nvsp_1_message_send_rndis_packet send_rndis_pkt;
360 struct nvsp_1_message_send_rndis_packet_complete
361 send_rndis_pkt_complete;
362} __packed;
363
364union nvsp_all_messages {
365 union nvsp_message_init_uber init_msg;
366 union nvsp_1_message_uber v1_msg;
367} __packed;
368
369/* ALL Messages */
370struct nvsp_message {
371 struct nvsp_message_header hdr;
372 union nvsp_all_messages msg;
373} __packed;
374
375
376
377
378/* #define NVSC_MIN_PROTOCOL_VERSION 1 */
379/* #define NVSC_MAX_PROTOCOL_VERSION 1 */
380
381#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024) /* 1MB */
382
383#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
384
385#define NETVSC_RECEIVE_SG_COUNT 1
386
387/* Preallocated receive packets */
388#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
389
390#define NETVSC_PACKET_SIZE 2048
391
392/* Per netvsc channel-specific */
393struct netvsc_device {
394 struct hv_device *dev;
395
396 atomic_t num_outstanding_sends;
397 bool destroy;
398 /*
399 * List of free preallocated hv_netvsc_packet to represent receive
400 * packet
401 */
402 struct list_head recv_pkt_list;
403 spinlock_t recv_pkt_list_lock;
404
405 /* Receive buffer allocated by us but manages by NetVSP */
406 void *recv_buf;
407 u32 recv_buf_size;
408 u32 recv_buf_gpadl_handle;
409 u32 recv_section_cnt;
410 struct nvsp_1_receive_buffer_section *recv_section;
411
412 /* Used for NetVSP initialization protocol */
413 struct completion channel_init_wait;
414 struct nvsp_message channel_init_pkt;
415
416 struct nvsp_message revoke_packet;
417 /* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
418
419 struct net_device *ndev;
420
421 /* Holds rndis device info */
422 void *extension;
423};
424
425
426/* Status codes */
427
428
429#ifndef STATUS_SUCCESS
430#define STATUS_SUCCESS (0x00000000L)
431#endif
432
433#ifndef STATUS_UNSUCCESSFUL
434#define STATUS_UNSUCCESSFUL (0xC0000001L)
435#endif
436
437#ifndef STATUS_PENDING
438#define STATUS_PENDING (0x00000103L)
439#endif
440
441#ifndef STATUS_INSUFFICIENT_RESOURCES
442#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
443#endif
444
445#ifndef STATUS_BUFFER_OVERFLOW
446#define STATUS_BUFFER_OVERFLOW (0x80000005L)
447#endif
448
449#ifndef STATUS_NOT_SUPPORTED
450#define STATUS_NOT_SUPPORTED (0xC00000BBL)
451#endif
452
453#define RNDIS_STATUS_SUCCESS (STATUS_SUCCESS)
454#define RNDIS_STATUS_PENDING (STATUS_PENDING)
455#define RNDIS_STATUS_NOT_RECOGNIZED (0x00010001L)
456#define RNDIS_STATUS_NOT_COPIED (0x00010002L)
457#define RNDIS_STATUS_NOT_ACCEPTED (0x00010003L)
458#define RNDIS_STATUS_CALL_ACTIVE (0x00010007L)
459
460#define RNDIS_STATUS_ONLINE (0x40010003L)
461#define RNDIS_STATUS_RESET_START (0x40010004L)
462#define RNDIS_STATUS_RESET_END (0x40010005L)
463#define RNDIS_STATUS_RING_STATUS (0x40010006L)
464#define RNDIS_STATUS_CLOSED (0x40010007L)
465#define RNDIS_STATUS_WAN_LINE_UP (0x40010008L)
466#define RNDIS_STATUS_WAN_LINE_DOWN (0x40010009L)
467#define RNDIS_STATUS_WAN_FRAGMENT (0x4001000AL)
468#define RNDIS_STATUS_MEDIA_CONNECT (0x4001000BL)
469#define RNDIS_STATUS_MEDIA_DISCONNECT (0x4001000CL)
470#define RNDIS_STATUS_HARDWARE_LINE_UP (0x4001000DL)
471#define RNDIS_STATUS_HARDWARE_LINE_DOWN (0x4001000EL)
472#define RNDIS_STATUS_INTERFACE_UP (0x4001000FL)
473#define RNDIS_STATUS_INTERFACE_DOWN (0x40010010L)
474#define RNDIS_STATUS_MEDIA_BUSY (0x40010011L)
475#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION (0x40010012L)
476#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION
477#define RNDIS_STATUS_LINK_SPEED_CHANGE (0x40010013L)
478
479#define RNDIS_STATUS_NOT_RESETTABLE (0x80010001L)
480#define RNDIS_STATUS_SOFT_ERRORS (0x80010003L)
481#define RNDIS_STATUS_HARD_ERRORS (0x80010004L)
482#define RNDIS_STATUS_BUFFER_OVERFLOW (STATUS_BUFFER_OVERFLOW)
483
484#define RNDIS_STATUS_FAILURE (STATUS_UNSUCCESSFUL)
485#define RNDIS_STATUS_RESOURCES (STATUS_INSUFFICIENT_RESOURCES)
486#define RNDIS_STATUS_CLOSING (0xC0010002L)
487#define RNDIS_STATUS_BAD_VERSION (0xC0010004L)
488#define RNDIS_STATUS_BAD_CHARACTERISTICS (0xC0010005L)
489#define RNDIS_STATUS_ADAPTER_NOT_FOUND (0xC0010006L)
490#define RNDIS_STATUS_OPEN_FAILED (0xC0010007L)
491#define RNDIS_STATUS_DEVICE_FAILED (0xC0010008L)
492#define RNDIS_STATUS_MULTICAST_FULL (0xC0010009L)
493#define RNDIS_STATUS_MULTICAST_EXISTS (0xC001000AL)
494#define RNDIS_STATUS_MULTICAST_NOT_FOUND (0xC001000BL)
495#define RNDIS_STATUS_REQUEST_ABORTED (0xC001000CL)
496#define RNDIS_STATUS_RESET_IN_PROGRESS (0xC001000DL)
497#define RNDIS_STATUS_CLOSING_INDICATING (0xC001000EL)
498#define RNDIS_STATUS_NOT_SUPPORTED (STATUS_NOT_SUPPORTED)
499#define RNDIS_STATUS_INVALID_PACKET (0xC001000FL)
500#define RNDIS_STATUS_OPEN_LIST_FULL (0xC0010010L)
501#define RNDIS_STATUS_ADAPTER_NOT_READY (0xC0010011L)
502#define RNDIS_STATUS_ADAPTER_NOT_OPEN (0xC0010012L)
503#define RNDIS_STATUS_NOT_INDICATING (0xC0010013L)
504#define RNDIS_STATUS_INVALID_LENGTH (0xC0010014L)
505#define RNDIS_STATUS_INVALID_DATA (0xC0010015L)
506#define RNDIS_STATUS_BUFFER_TOO_SHORT (0xC0010016L)
507#define RNDIS_STATUS_INVALID_OID (0xC0010017L)
508#define RNDIS_STATUS_ADAPTER_REMOVED (0xC0010018L)
509#define RNDIS_STATUS_UNSUPPORTED_MEDIA (0xC0010019L)
510#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE (0xC001001AL)
511#define RNDIS_STATUS_FILE_NOT_FOUND (0xC001001BL)
512#define RNDIS_STATUS_ERROR_READING_FILE (0xC001001CL)
513#define RNDIS_STATUS_ALREADY_MAPPED (0xC001001DL)
514#define RNDIS_STATUS_RESOURCE_CONFLICT (0xC001001EL)
515#define RNDIS_STATUS_NO_CABLE (0xC001001FL)
516
517#define RNDIS_STATUS_INVALID_SAP (0xC0010020L)
518#define RNDIS_STATUS_SAP_IN_USE (0xC0010021L)
519#define RNDIS_STATUS_INVALID_ADDRESS (0xC0010022L)
520#define RNDIS_STATUS_VC_NOT_ACTIVATED (0xC0010023L)
521#define RNDIS_STATUS_DEST_OUT_OF_ORDER (0xC0010024L)
522#define RNDIS_STATUS_VC_NOT_AVAILABLE (0xC0010025L)
523#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE (0xC0010026L)
524#define RNDIS_STATUS_INCOMPATABLE_QOS (0xC0010027L)
525#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED (0xC0010028L)
526#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION (0xC0010029L)
527
528#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR (0xC0011000L)
529
530/* Object Identifiers used by NdisRequest Query/Set Information */
531/* General Objects */
532#define RNDIS_OID_GEN_SUPPORTED_LIST 0x00010101
533#define RNDIS_OID_GEN_HARDWARE_STATUS 0x00010102
534#define RNDIS_OID_GEN_MEDIA_SUPPORTED 0x00010103
535#define RNDIS_OID_GEN_MEDIA_IN_USE 0x00010104
536#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
537#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
538#define RNDIS_OID_GEN_LINK_SPEED 0x00010107
539#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
540#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
541#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
542#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
543#define RNDIS_OID_GEN_VENDOR_ID 0x0001010C
544#define RNDIS_OID_GEN_VENDOR_DESCRIPTION 0x0001010D
545#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
546#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
547#define RNDIS_OID_GEN_DRIVER_VERSION 0x00010110
548#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
549#define RNDIS_OID_GEN_PROTOCOL_OPTIONS 0x00010112
550#define RNDIS_OID_GEN_MAC_OPTIONS 0x00010113
551#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
552#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
553#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
554#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
555#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
556#define RNDIS_OID_GEN_MACHINE_NAME 0x0001021A
557#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
558
559#define RNDIS_OID_GEN_XMIT_OK 0x00020101
560#define RNDIS_OID_GEN_RCV_OK 0x00020102
561#define RNDIS_OID_GEN_XMIT_ERROR 0x00020103
562#define RNDIS_OID_GEN_RCV_ERROR 0x00020104
563#define RNDIS_OID_GEN_RCV_NO_BUFFER 0x00020105
564
565#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT 0x00020201
566#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
567#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT 0x00020203
568#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
569#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT 0x00020205
570#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
571#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV 0x00020207
572#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
573#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV 0x00020209
574#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
575#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV 0x0002020B
576#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
577
578#define RNDIS_OID_GEN_RCV_CRC_ERROR 0x0002020D
579#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
580
581#define RNDIS_OID_GEN_GET_TIME_CAPS 0x0002020F
582#define RNDIS_OID_GEN_GET_NETCARD_TIME 0x00020210
583
584/* These are connection-oriented general OIDs. */
585/* These replace the above OIDs for connection-oriented media. */
586#define RNDIS_OID_GEN_CO_SUPPORTED_LIST 0x00010101
587#define RNDIS_OID_GEN_CO_HARDWARE_STATUS 0x00010102
588#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED 0x00010103
589#define RNDIS_OID_GEN_CO_MEDIA_IN_USE 0x00010104
590#define RNDIS_OID_GEN_CO_LINK_SPEED 0x00010105
591#define RNDIS_OID_GEN_CO_VENDOR_ID 0x00010106
592#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION 0x00010107
593#define RNDIS_OID_GEN_CO_DRIVER_VERSION 0x00010108
594#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS 0x00010109
595#define RNDIS_OID_GEN_CO_MAC_OPTIONS 0x0001010A
596#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS 0x0001010B
597#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION 0x0001010C
598#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED 0x0001010D
599
600#define RNDIS_OID_GEN_CO_GET_TIME_CAPS 0x00010201
601#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME 0x00010202
602
603/* These are connection-oriented statistics OIDs. */
604#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK 0x00020101
605#define RNDIS_OID_GEN_CO_RCV_PDUS_OK 0x00020102
606#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR 0x00020103
607#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR 0x00020104
608#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER 0x00020105
609
610
611#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR 0x00020201
612#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH 0x00020202
613#define RNDIS_OID_GEN_CO_BYTES_XMIT 0x00020203
614#define RNDIS_OID_GEN_CO_BYTES_RCV 0x00020204
615#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING 0x00020205
616#define RNDIS_OID_GEN_CO_NETCARD_LOAD 0x00020206
617
618/* These are objects for Connection-oriented media call-managers. */
619#define RNDIS_OID_CO_ADD_PVC 0xFF000001
620#define RNDIS_OID_CO_DELETE_PVC 0xFF000002
621#define RNDIS_OID_CO_GET_CALL_INFORMATION 0xFF000003
622#define RNDIS_OID_CO_ADD_ADDRESS 0xFF000004
623#define RNDIS_OID_CO_DELETE_ADDRESS 0xFF000005
624#define RNDIS_OID_CO_GET_ADDRESSES 0xFF000006
625#define RNDIS_OID_CO_ADDRESS_CHANGE 0xFF000007
626#define RNDIS_OID_CO_SIGNALING_ENABLED 0xFF000008
627#define RNDIS_OID_CO_SIGNALING_DISABLED 0xFF000009
628
629/* 802.3 Objects (Ethernet) */
630#define RNDIS_OID_802_3_PERMANENT_ADDRESS 0x01010101
631#define RNDIS_OID_802_3_CURRENT_ADDRESS 0x01010102
632#define RNDIS_OID_802_3_MULTICAST_LIST 0x01010103
633#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
634#define RNDIS_OID_802_3_MAC_OPTIONS 0x01010105
635
636#define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001
637
638#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
639#define RNDIS_OID_802_3_XMIT_ONE_COLLISION 0x01020102
640#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
641
642#define RNDIS_OID_802_3_XMIT_DEFERRED 0x01020201
643#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
644#define RNDIS_OID_802_3_RCV_OVERRUN 0x01020203
645#define RNDIS_OID_802_3_XMIT_UNDERRUN 0x01020204
646#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
647#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
648#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
649
650/* Remote NDIS message types */
651#define REMOTE_NDIS_PACKET_MSG 0x00000001
652#define REMOTE_NDIS_INITIALIZE_MSG 0x00000002
653#define REMOTE_NDIS_HALT_MSG 0x00000003
654#define REMOTE_NDIS_QUERY_MSG 0x00000004
655#define REMOTE_NDIS_SET_MSG 0x00000005
656#define REMOTE_NDIS_RESET_MSG 0x00000006
657#define REMOTE_NDIS_INDICATE_STATUS_MSG 0x00000007
658#define REMOTE_NDIS_KEEPALIVE_MSG 0x00000008
659
660#define REMOTE_CONDIS_MP_CREATE_VC_MSG 0x00008001
661#define REMOTE_CONDIS_MP_DELETE_VC_MSG 0x00008002
662#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG 0x00008005
663#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG 0x00008006
664#define REMOTE_CONDIS_INDICATE_STATUS_MSG 0x00008007
665
666/* Remote NDIS message completion types */
667#define REMOTE_NDIS_INITIALIZE_CMPLT 0x80000002
668#define REMOTE_NDIS_QUERY_CMPLT 0x80000004
669#define REMOTE_NDIS_SET_CMPLT 0x80000005
670#define REMOTE_NDIS_RESET_CMPLT 0x80000006
671#define REMOTE_NDIS_KEEPALIVE_CMPLT 0x80000008
672
673#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT 0x80008001
674#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT 0x80008002
675#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT 0x80008005
676#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT 0x80008006
677
678/*
679 * Reserved message type for private communication between lower-layer host
680 * driver and remote device, if necessary.
681 */
682#define REMOTE_NDIS_BUS_MSG 0xff000001
683
684/* Defines for DeviceFlags in struct rndis_initialize_complete */
685#define RNDIS_DF_CONNECTIONLESS 0x00000001
686#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002
687#define RNDIS_DF_RAW_DATA 0x00000004
688
689/* Remote NDIS medium types. */
690#define RNDIS_MEDIUM_802_3 0x00000000
691#define RNDIS_MEDIUM_802_5 0x00000001
692#define RNDIS_MEDIUM_FDDI 0x00000002
693#define RNDIS_MEDIUM_WAN 0x00000003
694#define RNDIS_MEDIUM_LOCAL_TALK 0x00000004
695#define RNDIS_MEDIUM_ARCNET_RAW 0x00000006
696#define RNDIS_MEDIUM_ARCNET_878_2 0x00000007
697#define RNDIS_MEDIUM_ATM 0x00000008
698#define RNDIS_MEDIUM_WIRELESS_WAN 0x00000009
699#define RNDIS_MEDIUM_IRDA 0x0000000a
700#define RNDIS_MEDIUM_CO_WAN 0x0000000b
701/* Not a real medium, defined as an upper-bound */
702#define RNDIS_MEDIUM_MAX 0x0000000d
703
704
705/* Remote NDIS medium connection states. */
706#define RNDIS_MEDIA_STATE_CONNECTED 0x00000000
707#define RNDIS_MEDIA_STATE_DISCONNECTED 0x00000001
708
709/* Remote NDIS version numbers */
710#define RNDIS_MAJOR_VERSION 0x00000001
711#define RNDIS_MINOR_VERSION 0x00000000
712
713
714/* NdisInitialize message */
715struct rndis_initialize_request {
716 u32 req_id;
717 u32 major_ver;
718 u32 minor_ver;
719 u32 max_xfer_size;
720};
721
722/* Response to NdisInitialize */
723struct rndis_initialize_complete {
724 u32 req_id;
725 u32 status;
726 u32 major_ver;
727 u32 minor_ver;
728 u32 dev_flags;
729 u32 medium;
730 u32 max_pkt_per_msg;
731 u32 max_xfer_size;
732 u32 pkt_alignment_factor;
733 u32 af_list_offset;
734 u32 af_list_size;
735};
736
737/* Call manager devices only: Information about an address family */
738/* supported by the device is appended to the response to NdisInitialize. */
739struct rndis_co_address_family {
740 u32 address_family;
741 u32 major_ver;
742 u32 minor_ver;
743};
744
745/* NdisHalt message */
746struct rndis_halt_request {
747 u32 req_id;
748};
749
750/* NdisQueryRequest message */
751struct rndis_query_request {
752 u32 req_id;
753 u32 oid;
754 u32 info_buflen;
755 u32 info_buf_offset;
756 u32 dev_vc_handle;
757};
758
759/* Response to NdisQueryRequest */
760struct rndis_query_complete {
761 u32 req_id;
762 u32 status;
763 u32 info_buflen;
764 u32 info_buf_offset;
765};
766
767/* NdisSetRequest message */
768struct rndis_set_request {
769 u32 req_id;
770 u32 oid;
771 u32 info_buflen;
772 u32 info_buf_offset;
773 u32 dev_vc_handle;
774};
775
776/* Response to NdisSetRequest */
777struct rndis_set_complete {
778 u32 req_id;
779 u32 status;
780};
781
782/* NdisReset message */
783struct rndis_reset_request {
784 u32 reserved;
785};
786
787/* Response to NdisReset */
788struct rndis_reset_complete {
789 u32 status;
790 u32 addressing_reset;
791};
792
793/* NdisMIndicateStatus message */
794struct rndis_indicate_status {
795 u32 status;
796 u32 status_buflen;
797 u32 status_buf_offset;
798};
799
800/* Diagnostic information passed as the status buffer in */
801/* struct rndis_indicate_status messages signifying error conditions. */
802struct rndis_diagnostic_info {
803 u32 diag_status;
804 u32 error_offset;
805};
806
807/* NdisKeepAlive message */
808struct rndis_keepalive_request {
809 u32 req_id;
810};
811
812/* Response to NdisKeepAlive */
813struct rndis_keepalive_complete {
814 u32 req_id;
815 u32 status;
816};
817
818/*
819 * Data message. All Offset fields contain byte offsets from the beginning of
820 * struct rndis_packet. All Length fields are in bytes. VcHandle is set
821 * to 0 for connectionless data, otherwise it contains the VC handle.
822 */
823struct rndis_packet {
824 u32 data_offset;
825 u32 data_len;
826 u32 oob_data_offset;
827 u32 oob_data_len;
828 u32 num_oob_data_elements;
829 u32 per_pkt_info_offset;
830 u32 per_pkt_info_len;
831 u32 vc_handle;
832 u32 reserved;
833};
834
835/* Optional Out of Band data associated with a Data message. */
836struct rndis_oobd {
837 u32 size;
838 u32 type;
839 u32 class_info_offset;
840};
841
842/* Packet extension field contents associated with a Data message. */
843struct rndis_per_packet_info {
844 u32 size;
845 u32 type;
846 u32 per_pkt_info_offset;
847};
848
849/* Format of Information buffer passed in a SetRequest for the OID */
850/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
851struct rndis_config_parameter_info {
852 u32 parameter_name_offset;
853 u32 parameter_name_length;
854 u32 parameter_type;
855 u32 parameter_value_offset;
856 u32 parameter_value_length;
857};
858
859/* Values for ParameterType in struct rndis_config_parameter_info */
860#define RNDIS_CONFIG_PARAM_TYPE_INTEGER 0
861#define RNDIS_CONFIG_PARAM_TYPE_STRING 2
862
863/* CONDIS Miniport messages for connection oriented devices */
864/* that do not implement a call manager. */
865
866/* CoNdisMiniportCreateVc message */
867struct rcondis_mp_create_vc {
868 u32 req_id;
869 u32 ndis_vc_handle;
870};
871
872/* Response to CoNdisMiniportCreateVc */
873struct rcondis_mp_create_vc_complete {
874 u32 req_id;
875 u32 dev_vc_handle;
876 u32 status;
877};
878
879/* CoNdisMiniportDeleteVc message */
880struct rcondis_mp_delete_vc {
881 u32 req_id;
882 u32 dev_vc_handle;
883};
884
885/* Response to CoNdisMiniportDeleteVc */
886struct rcondis_mp_delete_vc_complete {
887 u32 req_id;
888 u32 status;
889};
890
891/* CoNdisMiniportQueryRequest message */
892struct rcondis_mp_query_request {
893 u32 req_id;
894 u32 request_type;
895 u32 oid;
896 u32 dev_vc_handle;
897 u32 info_buflen;
898 u32 info_buf_offset;
899};
900
901/* CoNdisMiniportSetRequest message */
902struct rcondis_mp_set_request {
903 u32 req_id;
904 u32 request_type;
905 u32 oid;
906 u32 dev_vc_handle;
907 u32 info_buflen;
908 u32 info_buf_offset;
909};
910
911/* CoNdisIndicateStatus message */
912struct rcondis_indicate_status {
913 u32 ndis_vc_handle;
914 u32 status;
915 u32 status_buflen;
916 u32 status_buf_offset;
917};
918
919/* CONDIS Call/VC parameters */
920struct rcondis_specific_parameters {
921 u32 parameter_type;
922 u32 parameter_length;
923 u32 parameter_lffset;
924};
925
926struct rcondis_media_parameters {
927 u32 flags;
928 u32 reserved1;
929 u32 reserved2;
930 struct rcondis_specific_parameters media_specific;
931};
932
933struct rndis_flowspec {
934 u32 token_rate;
935 u32 token_bucket_size;
936 u32 peak_bandwidth;
937 u32 latency;
938 u32 delay_variation;
939 u32 service_type;
940 u32 max_sdu_size;
941 u32 minimum_policed_size;
942};
943
944struct rcondis_call_manager_parameters {
945 struct rndis_flowspec transmit;
946 struct rndis_flowspec receive;
947 struct rcondis_specific_parameters call_mgr_specific;
948};
949
950/* CoNdisMiniportActivateVc message */
951struct rcondis_mp_activate_vc_request {
952 u32 req_id;
953 u32 flags;
954 u32 dev_vc_handle;
955 u32 media_params_offset;
956 u32 media_params_length;
957 u32 call_mgr_params_offset;
958 u32 call_mgr_params_length;
959};
960
961/* Response to CoNdisMiniportActivateVc */
962struct rcondis_mp_activate_vc_complete {
963 u32 req_id;
964 u32 status;
965};
966
967/* CoNdisMiniportDeactivateVc message */
968struct rcondis_mp_deactivate_vc_request {
969 u32 req_id;
970 u32 flags;
971 u32 dev_vc_handle;
972};
973
974/* Response to CoNdisMiniportDeactivateVc */
975struct rcondis_mp_deactivate_vc_complete {
976 u32 req_id;
977 u32 status;
978};
979
980
981/* union with all of the RNDIS messages */
982union rndis_message_container {
983 struct rndis_packet pkt;
984 struct rndis_initialize_request init_req;
985 struct rndis_halt_request halt_req;
986 struct rndis_query_request query_req;
987 struct rndis_set_request set_req;
988 struct rndis_reset_request reset_req;
989 struct rndis_keepalive_request keep_alive_req;
990 struct rndis_indicate_status indicate_status;
991 struct rndis_initialize_complete init_complete;
992 struct rndis_query_complete query_complete;
993 struct rndis_set_complete set_complete;
994 struct rndis_reset_complete reset_complete;
995 struct rndis_keepalive_complete keep_alive_complete;
996 struct rcondis_mp_create_vc co_miniport_create_vc;
997 struct rcondis_mp_delete_vc co_miniport_delete_vc;
998 struct rcondis_indicate_status co_indicate_status;
999 struct rcondis_mp_activate_vc_request co_miniport_activate_vc;
1000 struct rcondis_mp_deactivate_vc_request co_miniport_deactivate_vc;
1001 struct rcondis_mp_create_vc_complete co_miniport_create_vc_complete;
1002 struct rcondis_mp_delete_vc_complete co_miniport_delete_vc_complete;
1003 struct rcondis_mp_activate_vc_complete co_miniport_activate_vc_complete;
1004 struct rcondis_mp_deactivate_vc_complete
1005 co_miniport_deactivate_vc_complete;
1006};
1007
1008/* Remote NDIS message format */
1009struct rndis_message {
1010 u32 ndis_msg_type;
1011
1012 /* Total length of this message, from the beginning */
1013 /* of the sruct rndis_message, in bytes. */
1014 u32 msg_len;
1015
1016 /* Actual message */
1017 union rndis_message_container msg;
1018};
1019
1020
1021struct rndis_filter_packet {
1022 void *completion_ctx;
1023 void (*completion)(void *context);
1024 struct rndis_message msg;
1025};
1026
1027/* Handy macros */
1028
1029/* get the size of an RNDIS message. Pass in the message type, */
1030/* struct rndis_set_request, struct rndis_packet for example */
1031#define RNDIS_MESSAGE_SIZE(msg) \
1032 (sizeof(msg) + (sizeof(struct rndis_message) - \
1033 sizeof(union rndis_message_container)))
1034
1035/* get pointer to info buffer with message pointer */
1036#define MESSAGE_TO_INFO_BUFFER(msg) \
1037 (((unsigned char *)(msg)) + msg->info_buf_offset)
1038
1039/* get pointer to status buffer with message pointer */
1040#define MESSAGE_TO_STATUS_BUFFER(msg) \
1041 (((unsigned char *)(msg)) + msg->status_buf_offset)
1042
1043/* get pointer to OOBD buffer with message pointer */
1044#define MESSAGE_TO_OOBD_BUFFER(msg) \
1045 (((unsigned char *)(msg)) + msg->oob_data_offset)
1046
1047/* get pointer to data buffer with message pointer */
1048#define MESSAGE_TO_DATA_BUFFER(msg) \
1049 (((unsigned char *)(msg)) + msg->per_pkt_info_offset)
1050
1051/* get pointer to contained message from NDIS_MESSAGE pointer */
1052#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg) \
1053 ((void *) &rndis_msg->msg)
1054
1055/* get pointer to contained message from NDIS_MESSAGE pointer */
1056#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg) \
1057 ((void *) rndis_msg)
1058
1059
1060#define __struct_bcount(x)
1061
1062
1063
1064#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
1065 sizeof(union rndis_message_container))
1066
1067#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
1068#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
1069#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
1070#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
1071#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
1072#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
1073#define NDIS_PACKET_TYPE_SMT 0x00000040
1074#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
1075#define NDIS_PACKET_TYPE_GROUP 0x00000100
1076#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00000200
1077#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
1078#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
1079
1080
1081
1082#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
new file mode 100644
index 000000000000..b6ac152a9bd0
--- /dev/null
+++ b/drivers/net/hyperv/netvsc.c
@@ -0,0 +1,949 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/wait.h>
26#include <linux/mm.h>
27#include <linux/delay.h>
28#include <linux/io.h>
29#include <linux/slab.h>
30#include <linux/netdevice.h>
31
32#include "hyperv_net.h"
33
34
35static struct netvsc_device *alloc_net_device(struct hv_device *device)
36{
37 struct netvsc_device *net_device;
38 struct net_device *ndev = hv_get_drvdata(device);
39
40 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
41 if (!net_device)
42 return NULL;
43
44
45 net_device->destroy = false;
46 net_device->dev = device;
47 net_device->ndev = ndev;
48
49 hv_set_drvdata(device, net_device);
50 return net_device;
51}
52
53static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
54{
55 struct netvsc_device *net_device;
56
57 net_device = hv_get_drvdata(device);
58 if (net_device && net_device->destroy)
59 net_device = NULL;
60
61 return net_device;
62}
63
64static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
65{
66 struct netvsc_device *net_device;
67
68 net_device = hv_get_drvdata(device);
69
70 if (!net_device)
71 goto get_in_err;
72
73 if (net_device->destroy &&
74 atomic_read(&net_device->num_outstanding_sends) == 0)
75 net_device = NULL;
76
77get_in_err:
78 return net_device;
79}
80
81
82static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
83{
84 struct nvsp_message *revoke_packet;
85 int ret = 0;
86 struct net_device *ndev = net_device->ndev;
87
88 /*
89 * If we got a section count, it means we received a
90 * SendReceiveBufferComplete msg (ie sent
91 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
92 * to send a revoke msg here
93 */
94 if (net_device->recv_section_cnt) {
95 /* Send the revoke receive buffer */
96 revoke_packet = &net_device->revoke_packet;
97 memset(revoke_packet, 0, sizeof(struct nvsp_message));
98
99 revoke_packet->hdr.msg_type =
100 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
101 revoke_packet->msg.v1_msg.
102 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
103
104 ret = vmbus_sendpacket(net_device->dev->channel,
105 revoke_packet,
106 sizeof(struct nvsp_message),
107 (unsigned long)revoke_packet,
108 VM_PKT_DATA_INBAND, 0);
109 /*
110 * If we failed here, we might as well return and
111 * have a leak rather than continue and a bugchk
112 */
113 if (ret != 0) {
114 netdev_err(ndev, "unable to send "
115 "revoke receive buffer to netvsp\n");
116 return ret;
117 }
118 }
119
120 /* Teardown the gpadl on the vsp end */
121 if (net_device->recv_buf_gpadl_handle) {
122 ret = vmbus_teardown_gpadl(net_device->dev->channel,
123 net_device->recv_buf_gpadl_handle);
124
125 /* If we failed here, we might as well return and have a leak
126 * rather than continue and a bugchk
127 */
128 if (ret != 0) {
129 netdev_err(ndev,
130 "unable to teardown receive buffer's gpadl\n");
131 return ret;
132 }
133 net_device->recv_buf_gpadl_handle = 0;
134 }
135
136 if (net_device->recv_buf) {
137 /* Free up the receive buffer */
138 free_pages((unsigned long)net_device->recv_buf,
139 get_order(net_device->recv_buf_size));
140 net_device->recv_buf = NULL;
141 }
142
143 if (net_device->recv_section) {
144 net_device->recv_section_cnt = 0;
145 kfree(net_device->recv_section);
146 net_device->recv_section = NULL;
147 }
148
149 return ret;
150}
151
152static int netvsc_init_recv_buf(struct hv_device *device)
153{
154 int ret = 0;
155 int t;
156 struct netvsc_device *net_device;
157 struct nvsp_message *init_packet;
158 struct net_device *ndev;
159
160 net_device = get_outbound_net_device(device);
161 if (!net_device)
162 return -ENODEV;
163 ndev = net_device->ndev;
164
165 net_device->recv_buf =
166 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
167 get_order(net_device->recv_buf_size));
168 if (!net_device->recv_buf) {
169 netdev_err(ndev, "unable to allocate receive "
170 "buffer of size %d\n", net_device->recv_buf_size);
171 ret = -ENOMEM;
172 goto cleanup;
173 }
174
175 /*
176 * Establish the gpadl handle for this buffer on this
177 * channel. Note: This call uses the vmbus connection rather
178 * than the channel to establish the gpadl handle.
179 */
180 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
181 net_device->recv_buf_size,
182 &net_device->recv_buf_gpadl_handle);
183 if (ret != 0) {
184 netdev_err(ndev,
185 "unable to establish receive buffer's gpadl\n");
186 goto cleanup;
187 }
188
189
190 /* Notify the NetVsp of the gpadl handle */
191 init_packet = &net_device->channel_init_pkt;
192
193 memset(init_packet, 0, sizeof(struct nvsp_message));
194
195 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
196 init_packet->msg.v1_msg.send_recv_buf.
197 gpadl_handle = net_device->recv_buf_gpadl_handle;
198 init_packet->msg.v1_msg.
199 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
200
201 /* Send the gpadl notification request */
202 ret = vmbus_sendpacket(device->channel, init_packet,
203 sizeof(struct nvsp_message),
204 (unsigned long)init_packet,
205 VM_PKT_DATA_INBAND,
206 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
207 if (ret != 0) {
208 netdev_err(ndev,
209 "unable to send receive buffer's gpadl to netvsp\n");
210 goto cleanup;
211 }
212
213 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
214 BUG_ON(t == 0);
215
216
217 /* Check the response */
218 if (init_packet->msg.v1_msg.
219 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
220 netdev_err(ndev, "Unable to complete receive buffer "
221 "initialization with NetVsp - status %d\n",
222 init_packet->msg.v1_msg.
223 send_recv_buf_complete.status);
224 ret = -EINVAL;
225 goto cleanup;
226 }
227
228 /* Parse the response */
229
230 net_device->recv_section_cnt = init_packet->msg.
231 v1_msg.send_recv_buf_complete.num_sections;
232
233 net_device->recv_section = kmemdup(
234 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
235 net_device->recv_section_cnt *
236 sizeof(struct nvsp_1_receive_buffer_section),
237 GFP_KERNEL);
238 if (net_device->recv_section == NULL) {
239 ret = -EINVAL;
240 goto cleanup;
241 }
242
243 /*
244 * For 1st release, there should only be 1 section that represents the
245 * entire receive buffer
246 */
247 if (net_device->recv_section_cnt != 1 ||
248 net_device->recv_section->offset != 0) {
249 ret = -EINVAL;
250 goto cleanup;
251 }
252
253 goto exit;
254
255cleanup:
256 netvsc_destroy_recv_buf(net_device);
257
258exit:
259 return ret;
260}
261
262
263static int netvsc_connect_vsp(struct hv_device *device)
264{
265 int ret, t;
266 struct netvsc_device *net_device;
267 struct nvsp_message *init_packet;
268 int ndis_version;
269 struct net_device *ndev;
270
271 net_device = get_outbound_net_device(device);
272 if (!net_device)
273 return -ENODEV;
274 ndev = net_device->ndev;
275
276 init_packet = &net_device->channel_init_pkt;
277
278 memset(init_packet, 0, sizeof(struct nvsp_message));
279 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
280 init_packet->msg.init_msg.init.min_protocol_ver =
281 NVSP_MIN_PROTOCOL_VERSION;
282 init_packet->msg.init_msg.init.max_protocol_ver =
283 NVSP_MAX_PROTOCOL_VERSION;
284
285 /* Send the init request */
286 ret = vmbus_sendpacket(device->channel, init_packet,
287 sizeof(struct nvsp_message),
288 (unsigned long)init_packet,
289 VM_PKT_DATA_INBAND,
290 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
291
292 if (ret != 0)
293 goto cleanup;
294
295 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
296
297 if (t == 0) {
298 ret = -ETIMEDOUT;
299 goto cleanup;
300 }
301
302 if (init_packet->msg.init_msg.init_complete.status !=
303 NVSP_STAT_SUCCESS) {
304 ret = -EINVAL;
305 goto cleanup;
306 }
307
308 if (init_packet->msg.init_msg.init_complete.
309 negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
310 ret = -EPROTO;
311 goto cleanup;
312 }
313 /* Send the ndis version */
314 memset(init_packet, 0, sizeof(struct nvsp_message));
315
316 ndis_version = 0x00050000;
317
318 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
319 init_packet->msg.v1_msg.
320 send_ndis_ver.ndis_major_ver =
321 (ndis_version & 0xFFFF0000) >> 16;
322 init_packet->msg.v1_msg.
323 send_ndis_ver.ndis_minor_ver =
324 ndis_version & 0xFFFF;
325
326 /* Send the init request */
327 ret = vmbus_sendpacket(device->channel, init_packet,
328 sizeof(struct nvsp_message),
329 (unsigned long)init_packet,
330 VM_PKT_DATA_INBAND, 0);
331 if (ret != 0)
332 goto cleanup;
333
334 /* Post the big receive buffer to NetVSP */
335 ret = netvsc_init_recv_buf(device);
336
337cleanup:
338 return ret;
339}
340
341static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
342{
343 netvsc_destroy_recv_buf(net_device);
344}
345
346/*
347 * netvsc_device_remove - Callback when the root bus device is removed
348 */
349int netvsc_device_remove(struct hv_device *device)
350{
351 struct netvsc_device *net_device;
352 struct hv_netvsc_packet *netvsc_packet, *pos;
353 unsigned long flags;
354
355 net_device = hv_get_drvdata(device);
356 spin_lock_irqsave(&device->channel->inbound_lock, flags);
357 net_device->destroy = true;
358 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
359
360 /* Wait for all send completions */
361 while (atomic_read(&net_device->num_outstanding_sends)) {
362 dev_info(&device->device,
363 "waiting for %d requests to complete...\n",
364 atomic_read(&net_device->num_outstanding_sends));
365 udelay(100);
366 }
367
368 netvsc_disconnect_vsp(net_device);
369
370 /*
371 * Since we have already drained, we don't need to busy wait
372 * as was done in final_release_stor_device()
373 * Note that we cannot set the ext pointer to NULL until
374 * we have drained - to drain the outgoing packets, we need to
375 * allow incoming packets.
376 */
377
378 spin_lock_irqsave(&device->channel->inbound_lock, flags);
379 hv_set_drvdata(device, NULL);
380 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
381
382 /*
383 * At this point, no one should be accessing net_device
384 * except in here
385 */
386 dev_notice(&device->device, "net device safe to remove\n");
387
388 /* Now, we can close the channel safely */
389 vmbus_close(device->channel);
390
391 /* Release all resources */
392 list_for_each_entry_safe(netvsc_packet, pos,
393 &net_device->recv_pkt_list, list_ent) {
394 list_del(&netvsc_packet->list_ent);
395 kfree(netvsc_packet);
396 }
397
398 kfree(net_device);
399 return 0;
400}
401
402static void netvsc_send_completion(struct hv_device *device,
403 struct vmpacket_descriptor *packet)
404{
405 struct netvsc_device *net_device;
406 struct nvsp_message *nvsp_packet;
407 struct hv_netvsc_packet *nvsc_packet;
408 struct net_device *ndev;
409
410 net_device = get_inbound_net_device(device);
411 if (!net_device)
412 return;
413 ndev = net_device->ndev;
414
415 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
416 (packet->offset8 << 3));
417
418 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
419 (nvsp_packet->hdr.msg_type ==
420 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
421 (nvsp_packet->hdr.msg_type ==
422 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
423 /* Copy the response back */
424 memcpy(&net_device->channel_init_pkt, nvsp_packet,
425 sizeof(struct nvsp_message));
426 complete(&net_device->channel_init_wait);
427 } else if (nvsp_packet->hdr.msg_type ==
428 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
429 /* Get the send context */
430 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
431 packet->trans_id;
432
433 /* Notify the layer above us */
434 nvsc_packet->completion.send.send_completion(
435 nvsc_packet->completion.send.send_completion_ctx);
436
437 atomic_dec(&net_device->num_outstanding_sends);
438
439 if (netif_queue_stopped(ndev))
440 netif_wake_queue(ndev);
441 } else {
442 netdev_err(ndev, "Unknown send completion packet type- "
443 "%d received!!\n", nvsp_packet->hdr.msg_type);
444 }
445
446}
447
448int netvsc_send(struct hv_device *device,
449 struct hv_netvsc_packet *packet)
450{
451 struct netvsc_device *net_device;
452 int ret = 0;
453 struct nvsp_message sendMessage;
454 struct net_device *ndev;
455
456 net_device = get_outbound_net_device(device);
457 if (!net_device)
458 return -ENODEV;
459 ndev = net_device->ndev;
460
461 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
462 if (packet->is_data_pkt) {
463 /* 0 is RMC_DATA; */
464 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
465 } else {
466 /* 1 is RMC_CONTROL; */
467 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
468 }
469
470 /* Not using send buffer section */
471 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
472 0xFFFFFFFF;
473 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
474
475 if (packet->page_buf_cnt) {
476 ret = vmbus_sendpacket_pagebuffer(device->channel,
477 packet->page_buf,
478 packet->page_buf_cnt,
479 &sendMessage,
480 sizeof(struct nvsp_message),
481 (unsigned long)packet);
482 } else {
483 ret = vmbus_sendpacket(device->channel, &sendMessage,
484 sizeof(struct nvsp_message),
485 (unsigned long)packet,
486 VM_PKT_DATA_INBAND,
487 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
488
489 }
490
491 if (ret == 0) {
492 atomic_inc(&net_device->num_outstanding_sends);
493 } else if (ret == -EAGAIN) {
494 netif_stop_queue(ndev);
495 if (atomic_read(&net_device->num_outstanding_sends) < 1)
496 netif_wake_queue(ndev);
497 } else {
498 netdev_err(ndev, "Unable to send packet %p ret %d\n",
499 packet, ret);
500 }
501
502 return ret;
503}
504
505static void netvsc_send_recv_completion(struct hv_device *device,
506 u64 transaction_id)
507{
508 struct nvsp_message recvcompMessage;
509 int retries = 0;
510 int ret;
511 struct net_device *ndev;
512 struct netvsc_device *net_device = hv_get_drvdata(device);
513
514 ndev = net_device->ndev;
515
516 recvcompMessage.hdr.msg_type =
517 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
518
519 /* FIXME: Pass in the status */
520 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
521 NVSP_STAT_SUCCESS;
522
523retry_send_cmplt:
524 /* Send the completion */
525 ret = vmbus_sendpacket(device->channel, &recvcompMessage,
526 sizeof(struct nvsp_message), transaction_id,
527 VM_PKT_COMP, 0);
528 if (ret == 0) {
529 /* success */
530 /* no-op */
531 } else if (ret == -EAGAIN) {
532 /* no more room...wait a bit and attempt to retry 3 times */
533 retries++;
534 netdev_err(ndev, "unable to send receive completion pkt"
535 " (tid %llx)...retrying %d\n", transaction_id, retries);
536
537 if (retries < 4) {
538 udelay(100);
539 goto retry_send_cmplt;
540 } else {
541 netdev_err(ndev, "unable to send receive "
542 "completion pkt (tid %llx)...give up retrying\n",
543 transaction_id);
544 }
545 } else {
546 netdev_err(ndev, "unable to send receive "
547 "completion pkt - %llx\n", transaction_id);
548 }
549}
550
551/* Send a receive completion packet to RNDIS device (ie NetVsp) */
552static void netvsc_receive_completion(void *context)
553{
554 struct hv_netvsc_packet *packet = context;
555 struct hv_device *device = (struct hv_device *)packet->device;
556 struct netvsc_device *net_device;
557 u64 transaction_id = 0;
558 bool fsend_receive_comp = false;
559 unsigned long flags;
560 struct net_device *ndev;
561
562 /*
563 * Even though it seems logical to do a GetOutboundNetDevice() here to
564 * send out receive completion, we are using GetInboundNetDevice()
565 * since we may have disable outbound traffic already.
566 */
567 net_device = get_inbound_net_device(device);
568 if (!net_device)
569 return;
570 ndev = net_device->ndev;
571
572 /* Overloading use of the lock. */
573 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
574
575 packet->xfer_page_pkt->count--;
576
577 /*
578 * Last one in the line that represent 1 xfer page packet.
579 * Return the xfer page packet itself to the freelist
580 */
581 if (packet->xfer_page_pkt->count == 0) {
582 fsend_receive_comp = true;
583 transaction_id = packet->completion.recv.recv_completion_tid;
584 list_add_tail(&packet->xfer_page_pkt->list_ent,
585 &net_device->recv_pkt_list);
586
587 }
588
589 /* Put the packet back */
590 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
591 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
592
593 /* Send a receive completion for the xfer page packet */
594 if (fsend_receive_comp)
595 netvsc_send_recv_completion(device, transaction_id);
596
597}
598
599static void netvsc_receive(struct hv_device *device,
600 struct vmpacket_descriptor *packet)
601{
602 struct netvsc_device *net_device;
603 struct vmtransfer_page_packet_header *vmxferpage_packet;
604 struct nvsp_message *nvsp_packet;
605 struct hv_netvsc_packet *netvsc_packet = NULL;
606 unsigned long start;
607 unsigned long end, end_virtual;
608 /* struct netvsc_driver *netvscDriver; */
609 struct xferpage_packet *xferpage_packet = NULL;
610 int i, j;
611 int count = 0, bytes_remain = 0;
612 unsigned long flags;
613 struct net_device *ndev;
614
615 LIST_HEAD(listHead);
616
617 net_device = get_inbound_net_device(device);
618 if (!net_device)
619 return;
620 ndev = net_device->ndev;
621
622 /*
623 * All inbound packets other than send completion should be xfer page
624 * packet
625 */
626 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
627 netdev_err(ndev, "Unknown packet type received - %d\n",
628 packet->type);
629 return;
630 }
631
632 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
633 (packet->offset8 << 3));
634
635 /* Make sure this is a valid nvsp packet */
636 if (nvsp_packet->hdr.msg_type !=
637 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
638 netdev_err(ndev, "Unknown nvsp packet type received-"
639 " %d\n", nvsp_packet->hdr.msg_type);
640 return;
641 }
642
643 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
644
645 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
646 netdev_err(ndev, "Invalid xfer page set id - "
647 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
648 vmxferpage_packet->xfer_pageset_id);
649 return;
650 }
651
652 /*
653 * Grab free packets (range count + 1) to represent this xfer
654 * page packet. +1 to represent the xfer page packet itself.
655 * We grab it here so that we know exactly how many we can
656 * fulfil
657 */
658 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
659 while (!list_empty(&net_device->recv_pkt_list)) {
660 list_move_tail(net_device->recv_pkt_list.next, &listHead);
661 if (++count == vmxferpage_packet->range_cnt + 1)
662 break;
663 }
664 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
665
666 /*
667 * We need at least 2 netvsc pkts (1 to represent the xfer
668 * page and at least 1 for the range) i.e. we can handled
669 * some of the xfer page packet ranges...
670 */
671 if (count < 2) {
672 netdev_err(ndev, "Got only %d netvsc pkt...needed "
673 "%d pkts. Dropping this xfer page packet completely!\n",
674 count, vmxferpage_packet->range_cnt + 1);
675
676 /* Return it to the freelist */
677 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
678 for (i = count; i != 0; i--) {
679 list_move_tail(listHead.next,
680 &net_device->recv_pkt_list);
681 }
682 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
683 flags);
684
685 netvsc_send_recv_completion(device,
686 vmxferpage_packet->d.trans_id);
687
688 return;
689 }
690
691 /* Remove the 1st packet to represent the xfer page packet itself */
692 xferpage_packet = (struct xferpage_packet *)listHead.next;
693 list_del(&xferpage_packet->list_ent);
694
695 /* This is how much we can satisfy */
696 xferpage_packet->count = count - 1;
697
698 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
699 netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
700 "this xfer page...got %d\n",
701 vmxferpage_packet->range_cnt, xferpage_packet->count);
702 }
703
704 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
705 for (i = 0; i < (count - 1); i++) {
706 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
707 list_del(&netvsc_packet->list_ent);
708
709 /* Initialize the netvsc packet */
710 netvsc_packet->xfer_page_pkt = xferpage_packet;
711 netvsc_packet->completion.recv.recv_completion =
712 netvsc_receive_completion;
713 netvsc_packet->completion.recv.recv_completion_ctx =
714 netvsc_packet;
715 netvsc_packet->device = device;
716 /* Save this so that we can send it back */
717 netvsc_packet->completion.recv.recv_completion_tid =
718 vmxferpage_packet->d.trans_id;
719
720 netvsc_packet->total_data_buflen =
721 vmxferpage_packet->ranges[i].byte_count;
722 netvsc_packet->page_buf_cnt = 1;
723
724 netvsc_packet->page_buf[0].len =
725 vmxferpage_packet->ranges[i].byte_count;
726
727 start = virt_to_phys((void *)((unsigned long)net_device->
728 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
729
730 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
731 end_virtual = (unsigned long)net_device->recv_buf
732 + vmxferpage_packet->ranges[i].byte_offset
733 + vmxferpage_packet->ranges[i].byte_count - 1;
734 end = virt_to_phys((void *)end_virtual);
735
736 /* Calculate the page relative offset */
737 netvsc_packet->page_buf[0].offset =
738 vmxferpage_packet->ranges[i].byte_offset &
739 (PAGE_SIZE - 1);
740 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
741 /* Handle frame across multiple pages: */
742 netvsc_packet->page_buf[0].len =
743 (netvsc_packet->page_buf[0].pfn <<
744 PAGE_SHIFT)
745 + PAGE_SIZE - start;
746 bytes_remain = netvsc_packet->total_data_buflen -
747 netvsc_packet->page_buf[0].len;
748 for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
749 netvsc_packet->page_buf[j].offset = 0;
750 if (bytes_remain <= PAGE_SIZE) {
751 netvsc_packet->page_buf[j].len =
752 bytes_remain;
753 bytes_remain = 0;
754 } else {
755 netvsc_packet->page_buf[j].len =
756 PAGE_SIZE;
757 bytes_remain -= PAGE_SIZE;
758 }
759 netvsc_packet->page_buf[j].pfn =
760 virt_to_phys((void *)(end_virtual -
761 bytes_remain)) >> PAGE_SHIFT;
762 netvsc_packet->page_buf_cnt++;
763 if (bytes_remain == 0)
764 break;
765 }
766 }
767
768 /* Pass it to the upper layer */
769 rndis_filter_receive(device, netvsc_packet);
770
771 netvsc_receive_completion(netvsc_packet->
772 completion.recv.recv_completion_ctx);
773 }
774
775}
776
777static void netvsc_channel_cb(void *context)
778{
779 int ret;
780 struct hv_device *device = context;
781 struct netvsc_device *net_device;
782 u32 bytes_recvd;
783 u64 request_id;
784 unsigned char *packet;
785 struct vmpacket_descriptor *desc;
786 unsigned char *buffer;
787 int bufferlen = NETVSC_PACKET_SIZE;
788 struct net_device *ndev;
789
790 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
791 GFP_ATOMIC);
792 if (!packet)
793 return;
794 buffer = packet;
795
796 net_device = get_inbound_net_device(device);
797 if (!net_device)
798 goto out;
799 ndev = net_device->ndev;
800
801 do {
802 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
803 &bytes_recvd, &request_id);
804 if (ret == 0) {
805 if (bytes_recvd > 0) {
806 desc = (struct vmpacket_descriptor *)buffer;
807 switch (desc->type) {
808 case VM_PKT_COMP:
809 netvsc_send_completion(device, desc);
810 break;
811
812 case VM_PKT_DATA_USING_XFER_PAGES:
813 netvsc_receive(device, desc);
814 break;
815
816 default:
817 netdev_err(ndev,
818 "unhandled packet type %d, "
819 "tid %llx len %d\n",
820 desc->type, request_id,
821 bytes_recvd);
822 break;
823 }
824
825 /* reset */
826 if (bufferlen > NETVSC_PACKET_SIZE) {
827 kfree(buffer);
828 buffer = packet;
829 bufferlen = NETVSC_PACKET_SIZE;
830 }
831 } else {
832 /* reset */
833 if (bufferlen > NETVSC_PACKET_SIZE) {
834 kfree(buffer);
835 buffer = packet;
836 bufferlen = NETVSC_PACKET_SIZE;
837 }
838
839 break;
840 }
841 } else if (ret == -ENOBUFS) {
842 /* Handle large packet */
843 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
844 if (buffer == NULL) {
845 /* Try again next time around */
846 netdev_err(ndev,
847 "unable to allocate buffer of size "
848 "(%d)!!\n", bytes_recvd);
849 break;
850 }
851
852 bufferlen = bytes_recvd;
853 }
854 } while (1);
855
856out:
857 kfree(buffer);
858 return;
859}
860
861/*
862 * netvsc_device_add - Callback when the device belonging to this
863 * driver is added
864 */
865int netvsc_device_add(struct hv_device *device, void *additional_info)
866{
867 int ret = 0;
868 int i;
869 int ring_size =
870 ((struct netvsc_device_info *)additional_info)->ring_size;
871 struct netvsc_device *net_device;
872 struct hv_netvsc_packet *packet, *pos;
873 struct net_device *ndev;
874
875 net_device = alloc_net_device(device);
876 if (!net_device) {
877 ret = -ENOMEM;
878 goto cleanup;
879 }
880
881 /*
882 * Coming into this function, struct net_device * is
883 * registered as the driver private data.
884 * In alloc_net_device(), we register struct netvsc_device *
885 * as the driver private data and stash away struct net_device *
886 * in struct netvsc_device *.
887 */
888 ndev = net_device->ndev;
889
890 /* Initialize the NetVSC channel extension */
891 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
892 spin_lock_init(&net_device->recv_pkt_list_lock);
893
894 INIT_LIST_HEAD(&net_device->recv_pkt_list);
895
896 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
897 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
898 (NETVSC_RECEIVE_SG_COUNT *
899 sizeof(struct hv_page_buffer)), GFP_KERNEL);
900 if (!packet)
901 break;
902
903 list_add_tail(&packet->list_ent,
904 &net_device->recv_pkt_list);
905 }
906 init_completion(&net_device->channel_init_wait);
907
908 /* Open the channel */
909 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
910 ring_size * PAGE_SIZE, NULL, 0,
911 netvsc_channel_cb, device);
912
913 if (ret != 0) {
914 netdev_err(ndev, "unable to open channel: %d\n", ret);
915 goto cleanup;
916 }
917
918 /* Channel is opened */
919 pr_info("hv_netvsc channel opened successfully\n");
920
921 /* Connect with the NetVsp */
922 ret = netvsc_connect_vsp(device);
923 if (ret != 0) {
924 netdev_err(ndev,
925 "unable to connect to NetVSP - %d\n", ret);
926 goto close;
927 }
928
929 return ret;
930
931close:
932 /* Now, we can close the channel safely */
933 vmbus_close(device->channel);
934
935cleanup:
936
937 if (net_device) {
938 list_for_each_entry_safe(packet, pos,
939 &net_device->recv_pkt_list,
940 list_ent) {
941 list_del(&packet->list_ent);
942 kfree(packet);
943 }
944
945 kfree(net_device);
946 }
947
948 return ret;
949}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
new file mode 100644
index 000000000000..7da85ebd7ac6
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -0,0 +1,474 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/init.h>
24#include <linux/atomic.h>
25#include <linux/module.h>
26#include <linux/highmem.h>
27#include <linux/device.h>
28#include <linux/io.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/inetdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/in.h>
35#include <linux/slab.h>
36#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
40
41#include "hyperv_net.h"
42
43struct net_device_context {
44 /* point back to our device context */
45 struct hv_device *device_ctx;
46 struct delayed_work dwork;
47};
48
49
50static int ring_size = 128;
51module_param(ring_size, int, S_IRUGO);
52MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
53
54struct set_multicast_work {
55 struct work_struct work;
56 struct net_device *net;
57};
58
59static void do_set_multicast(struct work_struct *w)
60{
61 struct set_multicast_work *swk =
62 container_of(w, struct set_multicast_work, work);
63 struct net_device *net = swk->net;
64
65 struct net_device_context *ndevctx = netdev_priv(net);
66 struct netvsc_device *nvdev;
67 struct rndis_device *rdev;
68
69 nvdev = hv_get_drvdata(ndevctx->device_ctx);
70 if (nvdev == NULL)
71 return;
72
73 rdev = nvdev->extension;
74 if (rdev == NULL)
75 return;
76
77 if (net->flags & IFF_PROMISC)
78 rndis_filter_set_packet_filter(rdev,
79 NDIS_PACKET_TYPE_PROMISCUOUS);
80 else
81 rndis_filter_set_packet_filter(rdev,
82 NDIS_PACKET_TYPE_BROADCAST |
83 NDIS_PACKET_TYPE_ALL_MULTICAST |
84 NDIS_PACKET_TYPE_DIRECTED);
85
86 kfree(w);
87}
88
89static void netvsc_set_multicast_list(struct net_device *net)
90{
91 struct set_multicast_work *swk =
92 kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
93 if (swk == NULL)
94 return;
95
96 swk->net = net;
97 INIT_WORK(&swk->work, do_set_multicast);
98 schedule_work(&swk->work);
99}
100
101static int netvsc_open(struct net_device *net)
102{
103 struct net_device_context *net_device_ctx = netdev_priv(net);
104 struct hv_device *device_obj = net_device_ctx->device_ctx;
105 int ret = 0;
106
107 /* Open up the device */
108 ret = rndis_filter_open(device_obj);
109 if (ret != 0) {
110 netdev_err(net, "unable to open device (ret %d).\n", ret);
111 return ret;
112 }
113
114 netif_start_queue(net);
115
116 return ret;
117}
118
119static int netvsc_close(struct net_device *net)
120{
121 struct net_device_context *net_device_ctx = netdev_priv(net);
122 struct hv_device *device_obj = net_device_ctx->device_ctx;
123 int ret;
124
125 netif_stop_queue(net);
126
127 ret = rndis_filter_close(device_obj);
128 if (ret != 0)
129 netdev_err(net, "unable to close device (ret %d).\n", ret);
130
131 return ret;
132}
133
134static void netvsc_xmit_completion(void *context)
135{
136 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
137 struct sk_buff *skb = (struct sk_buff *)
138 (unsigned long)packet->completion.send.send_completion_tid;
139
140 kfree(packet);
141
142 if (skb)
143 dev_kfree_skb_any(skb);
144}
145
146static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
147{
148 struct net_device_context *net_device_ctx = netdev_priv(net);
149 struct hv_netvsc_packet *packet;
150 int ret;
151 unsigned int i, num_pages;
152
153 /* Add 1 for skb->data and additional one for RNDIS */
154 num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
155
156 /* Allocate a netvsc packet based on # of frags. */
157 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
158 (num_pages * sizeof(struct hv_page_buffer)) +
159 sizeof(struct rndis_filter_packet), GFP_ATOMIC);
160 if (!packet) {
161 /* out of memory, drop packet */
162 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
163
164 dev_kfree_skb(skb);
165 net->stats.tx_dropped++;
166 return NETDEV_TX_BUSY;
167 }
168
169 packet->extension = (void *)(unsigned long)packet +
170 sizeof(struct hv_netvsc_packet) +
171 (num_pages * sizeof(struct hv_page_buffer));
172
173 /* Setup the rndis header */
174 packet->page_buf_cnt = num_pages;
175
176 /* Initialize it from the skb */
177 packet->total_data_buflen = skb->len;
178
179 /* Start filling in the page buffers starting after RNDIS buffer. */
180 packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
181 packet->page_buf[1].offset
182 = (unsigned long)skb->data & (PAGE_SIZE - 1);
183 packet->page_buf[1].len = skb_headlen(skb);
184
185 /* Additional fragments are after SKB data */
186 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
187 const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
188
189 packet->page_buf[i+2].pfn = page_to_pfn(skb_frag_page(f));
190 packet->page_buf[i+2].offset = f->page_offset;
191 packet->page_buf[i+2].len = skb_frag_size(f);
192 }
193
194 /* Set the completion routine */
195 packet->completion.send.send_completion = netvsc_xmit_completion;
196 packet->completion.send.send_completion_ctx = packet;
197 packet->completion.send.send_completion_tid = (unsigned long)skb;
198
199 ret = rndis_filter_send(net_device_ctx->device_ctx,
200 packet);
201 if (ret == 0) {
202 net->stats.tx_bytes += skb->len;
203 net->stats.tx_packets++;
204 } else {
205 /* we are shutting down or bus overloaded, just drop packet */
206 net->stats.tx_dropped++;
207 kfree(packet);
208 dev_kfree_skb_any(skb);
209 }
210
211 return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
212}
213
214/*
215 * netvsc_linkstatus_callback - Link up/down notification
216 */
217void netvsc_linkstatus_callback(struct hv_device *device_obj,
218 unsigned int status)
219{
220 struct net_device *net;
221 struct net_device_context *ndev_ctx;
222 struct netvsc_device *net_device;
223
224 net_device = hv_get_drvdata(device_obj);
225 net = net_device->ndev;
226
227 if (!net) {
228 netdev_err(net, "got link status but net device "
229 "not initialized yet\n");
230 return;
231 }
232
233 if (status == 1) {
234 netif_carrier_on(net);
235 netif_wake_queue(net);
236 ndev_ctx = netdev_priv(net);
237 schedule_delayed_work(&ndev_ctx->dwork, 0);
238 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
239 } else {
240 netif_carrier_off(net);
241 netif_stop_queue(net);
242 }
243}
244
245/*
246 * netvsc_recv_callback - Callback when we receive a packet from the
247 * "wire" on the specified device.
248 */
249int netvsc_recv_callback(struct hv_device *device_obj,
250 struct hv_netvsc_packet *packet)
251{
252 struct net_device *net = dev_get_drvdata(&device_obj->device);
253 struct sk_buff *skb;
254 void *data;
255 int i;
256 unsigned long flags;
257 struct netvsc_device *net_device;
258
259 net_device = hv_get_drvdata(device_obj);
260 net = net_device->ndev;
261
262 if (!net) {
263 netdev_err(net, "got receive callback but net device"
264 " not initialized yet\n");
265 return 0;
266 }
267
268 /* Allocate a skb - TODO direct I/O to pages? */
269 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
270 if (unlikely(!skb)) {
271 ++net->stats.rx_dropped;
272 return 0;
273 }
274
275 /* for kmap_atomic */
276 local_irq_save(flags);
277
278 /*
279 * Copy to skb. This copy is needed here since the memory pointed by
280 * hv_netvsc_packet cannot be deallocated
281 */
282 for (i = 0; i < packet->page_buf_cnt; i++) {
283 data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
284 KM_IRQ1);
285 data = (void *)(unsigned long)data +
286 packet->page_buf[i].offset;
287
288 memcpy(skb_put(skb, packet->page_buf[i].len), data,
289 packet->page_buf[i].len);
290
291 kunmap_atomic((void *)((unsigned long)data -
292 packet->page_buf[i].offset), KM_IRQ1);
293 }
294
295 local_irq_restore(flags);
296
297 skb->protocol = eth_type_trans(skb, net);
298 skb->ip_summed = CHECKSUM_NONE;
299
300 net->stats.rx_packets++;
301 net->stats.rx_bytes += skb->len;
302
303 /*
304 * Pass the skb back up. Network stack will deallocate the skb when it
305 * is done.
306 * TODO - use NAPI?
307 */
308 netif_rx(skb);
309
310 return 0;
311}
312
313static void netvsc_get_drvinfo(struct net_device *net,
314 struct ethtool_drvinfo *info)
315{
316 strcpy(info->driver, "hv_netvsc");
317 strcpy(info->version, HV_DRV_VERSION);
318 strcpy(info->fw_version, "N/A");
319}
320
321static const struct ethtool_ops ethtool_ops = {
322 .get_drvinfo = netvsc_get_drvinfo,
323 .get_link = ethtool_op_get_link,
324};
325
326static const struct net_device_ops device_ops = {
327 .ndo_open = netvsc_open,
328 .ndo_stop = netvsc_close,
329 .ndo_start_xmit = netvsc_start_xmit,
330 .ndo_set_rx_mode = netvsc_set_multicast_list,
331 .ndo_change_mtu = eth_change_mtu,
332 .ndo_validate_addr = eth_validate_addr,
333 .ndo_set_mac_address = eth_mac_addr,
334};
335
336/*
337 * Send GARP packet to network peers after migrations.
338 * After Quick Migration, the network is not immediately operational in the
339 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
340 * another netif_notify_peers() into a delayed work, otherwise GARP packet
341 * will not be sent after quick migration, and cause network disconnection.
342 */
343static void netvsc_send_garp(struct work_struct *w)
344{
345 struct net_device_context *ndev_ctx;
346 struct net_device *net;
347 struct netvsc_device *net_device;
348
349 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
350 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
351 net = net_device->ndev;
352 netif_notify_peers(net);
353}
354
355
356static int netvsc_probe(struct hv_device *dev,
357 const struct hv_vmbus_device_id *dev_id)
358{
359 struct net_device *net = NULL;
360 struct net_device_context *net_device_ctx;
361 struct netvsc_device_info device_info;
362 int ret;
363
364 net = alloc_etherdev(sizeof(struct net_device_context));
365 if (!net)
366 return -ENOMEM;
367
368 /* Set initial state */
369 netif_carrier_off(net);
370
371 net_device_ctx = netdev_priv(net);
372 net_device_ctx->device_ctx = dev;
373 hv_set_drvdata(dev, net);
374 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
375
376 net->netdev_ops = &device_ops;
377
378 /* TODO: Add GSO and Checksum offload */
379 net->hw_features = NETIF_F_SG;
380 net->features = NETIF_F_SG;
381
382 SET_ETHTOOL_OPS(net, &ethtool_ops);
383 SET_NETDEV_DEV(net, &dev->device);
384
385 ret = register_netdev(net);
386 if (ret != 0) {
387 pr_err("Unable to register netdev.\n");
388 free_netdev(net);
389 goto out;
390 }
391
392 /* Notify the netvsc driver of the new device */
393 device_info.ring_size = ring_size;
394 ret = rndis_filter_device_add(dev, &device_info);
395 if (ret != 0) {
396 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
397 unregister_netdev(net);
398 free_netdev(net);
399 hv_set_drvdata(dev, NULL);
400 return ret;
401 }
402 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
403
404 netif_carrier_on(net);
405
406out:
407 return ret;
408}
409
410static int netvsc_remove(struct hv_device *dev)
411{
412 struct net_device *net;
413 struct net_device_context *ndev_ctx;
414 struct netvsc_device *net_device;
415
416 net_device = hv_get_drvdata(dev);
417 net = net_device->ndev;
418
419 if (net == NULL) {
420 dev_err(&dev->device, "No net device to remove\n");
421 return 0;
422 }
423
424 ndev_ctx = netdev_priv(net);
425 cancel_delayed_work_sync(&ndev_ctx->dwork);
426
427 /* Stop outbound asap */
428 netif_stop_queue(net);
429
430 unregister_netdev(net);
431
432 /*
433 * Call to the vsc driver to let it know that the device is being
434 * removed
435 */
436 rndis_filter_device_remove(dev);
437
438 free_netdev(net);
439 return 0;
440}
441
442static const struct hv_vmbus_device_id id_table[] = {
443 /* Network guid */
444 { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
445 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
446 { },
447};
448
449MODULE_DEVICE_TABLE(vmbus, id_table);
450
451/* The one and only one */
452static struct hv_driver netvsc_drv = {
453 .name = "netvsc",
454 .id_table = id_table,
455 .probe = netvsc_probe,
456 .remove = netvsc_remove,
457};
458
459static void __exit netvsc_drv_exit(void)
460{
461 vmbus_driver_unregister(&netvsc_drv);
462}
463
464static int __init netvsc_drv_init(void)
465{
466 return vmbus_driver_register(&netvsc_drv);
467}
468
469MODULE_LICENSE("GPL");
470MODULE_VERSION(HV_DRV_VERSION);
471MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
472
473module_init(netvsc_drv_init);
474module_exit(netvsc_drv_exit);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
new file mode 100644
index 000000000000..418e7aac229c
--- /dev/null
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -0,0 +1,834 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/wait.h>
24#include <linux/highmem.h>
25#include <linux/slab.h>
26#include <linux/io.h>
27#include <linux/if_ether.h>
28#include <linux/netdevice.h>
29
30#include "hyperv_net.h"
31
32
33struct rndis_request {
34 struct list_head list_ent;
35 struct completion wait_event;
36
37 /*
38 * FIXME: We assumed a fixed size response here. If we do ever need to
39 * handle a bigger response, we can either define a max response
40 * message or add a response buffer variable above this field
41 */
42 struct rndis_message response_msg;
43
44 /* Simplify allocation by having a netvsc packet inline */
45 struct hv_netvsc_packet pkt;
46 struct hv_page_buffer buf;
47 /* FIXME: We assumed a fixed size request here. */
48 struct rndis_message request_msg;
49};
50
51static void rndis_filter_send_completion(void *ctx);
52
53static void rndis_filter_send_request_completion(void *ctx);
54
55
56
57static struct rndis_device *get_rndis_device(void)
58{
59 struct rndis_device *device;
60
61 device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
62 if (!device)
63 return NULL;
64
65 spin_lock_init(&device->request_lock);
66
67 INIT_LIST_HEAD(&device->req_list);
68
69 device->state = RNDIS_DEV_UNINITIALIZED;
70
71 return device;
72}
73
74static struct rndis_request *get_rndis_request(struct rndis_device *dev,
75 u32 msg_type,
76 u32 msg_len)
77{
78 struct rndis_request *request;
79 struct rndis_message *rndis_msg;
80 struct rndis_set_request *set;
81 unsigned long flags;
82
83 request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
84 if (!request)
85 return NULL;
86
87 init_completion(&request->wait_event);
88
89 rndis_msg = &request->request_msg;
90 rndis_msg->ndis_msg_type = msg_type;
91 rndis_msg->msg_len = msg_len;
92
93 /*
94 * Set the request id. This field is always after the rndis header for
95 * request/response packet types so we just used the SetRequest as a
96 * template
97 */
98 set = &rndis_msg->msg.set_req;
99 set->req_id = atomic_inc_return(&dev->new_req_id);
100
101 /* Add to the request list */
102 spin_lock_irqsave(&dev->request_lock, flags);
103 list_add_tail(&request->list_ent, &dev->req_list);
104 spin_unlock_irqrestore(&dev->request_lock, flags);
105
106 return request;
107}
108
109static void put_rndis_request(struct rndis_device *dev,
110 struct rndis_request *req)
111{
112 unsigned long flags;
113
114 spin_lock_irqsave(&dev->request_lock, flags);
115 list_del(&req->list_ent);
116 spin_unlock_irqrestore(&dev->request_lock, flags);
117
118 kfree(req);
119}
120
121static void dump_rndis_message(struct hv_device *hv_dev,
122 struct rndis_message *rndis_msg)
123{
124 struct net_device *netdev;
125 struct netvsc_device *net_device;
126
127 net_device = hv_get_drvdata(hv_dev);
128 netdev = net_device->ndev;
129
130 switch (rndis_msg->ndis_msg_type) {
131 case REMOTE_NDIS_PACKET_MSG:
132 netdev_dbg(netdev, "REMOTE_NDIS_PACKET_MSG (len %u, "
133 "data offset %u data len %u, # oob %u, "
134 "oob offset %u, oob len %u, pkt offset %u, "
135 "pkt len %u\n",
136 rndis_msg->msg_len,
137 rndis_msg->msg.pkt.data_offset,
138 rndis_msg->msg.pkt.data_len,
139 rndis_msg->msg.pkt.num_oob_data_elements,
140 rndis_msg->msg.pkt.oob_data_offset,
141 rndis_msg->msg.pkt.oob_data_len,
142 rndis_msg->msg.pkt.per_pkt_info_offset,
143 rndis_msg->msg.pkt.per_pkt_info_len);
144 break;
145
146 case REMOTE_NDIS_INITIALIZE_CMPLT:
147 netdev_dbg(netdev, "REMOTE_NDIS_INITIALIZE_CMPLT "
148 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
149 "device flags %d, max xfer size 0x%x, max pkts %u, "
150 "pkt aligned %u)\n",
151 rndis_msg->msg_len,
152 rndis_msg->msg.init_complete.req_id,
153 rndis_msg->msg.init_complete.status,
154 rndis_msg->msg.init_complete.major_ver,
155 rndis_msg->msg.init_complete.minor_ver,
156 rndis_msg->msg.init_complete.dev_flags,
157 rndis_msg->msg.init_complete.max_xfer_size,
158 rndis_msg->msg.init_complete.
159 max_pkt_per_msg,
160 rndis_msg->msg.init_complete.
161 pkt_alignment_factor);
162 break;
163
164 case REMOTE_NDIS_QUERY_CMPLT:
165 netdev_dbg(netdev, "REMOTE_NDIS_QUERY_CMPLT "
166 "(len %u, id 0x%x, status 0x%x, buf len %u, "
167 "buf offset %u)\n",
168 rndis_msg->msg_len,
169 rndis_msg->msg.query_complete.req_id,
170 rndis_msg->msg.query_complete.status,
171 rndis_msg->msg.query_complete.
172 info_buflen,
173 rndis_msg->msg.query_complete.
174 info_buf_offset);
175 break;
176
177 case REMOTE_NDIS_SET_CMPLT:
178 netdev_dbg(netdev,
179 "REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)\n",
180 rndis_msg->msg_len,
181 rndis_msg->msg.set_complete.req_id,
182 rndis_msg->msg.set_complete.status);
183 break;
184
185 case REMOTE_NDIS_INDICATE_STATUS_MSG:
186 netdev_dbg(netdev, "REMOTE_NDIS_INDICATE_STATUS_MSG "
187 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
188 rndis_msg->msg_len,
189 rndis_msg->msg.indicate_status.status,
190 rndis_msg->msg.indicate_status.status_buflen,
191 rndis_msg->msg.indicate_status.status_buf_offset);
192 break;
193
194 default:
195 netdev_dbg(netdev, "0x%x (len %u)\n",
196 rndis_msg->ndis_msg_type,
197 rndis_msg->msg_len);
198 break;
199 }
200}
201
202static int rndis_filter_send_request(struct rndis_device *dev,
203 struct rndis_request *req)
204{
205 int ret;
206 struct hv_netvsc_packet *packet;
207
208 /* Setup the packet to send it */
209 packet = &req->pkt;
210
211 packet->is_data_pkt = false;
212 packet->total_data_buflen = req->request_msg.msg_len;
213 packet->page_buf_cnt = 1;
214
215 packet->page_buf[0].pfn = virt_to_phys(&req->request_msg) >>
216 PAGE_SHIFT;
217 packet->page_buf[0].len = req->request_msg.msg_len;
218 packet->page_buf[0].offset =
219 (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
220
221 packet->completion.send.send_completion_ctx = req;/* packet; */
222 packet->completion.send.send_completion =
223 rndis_filter_send_request_completion;
224 packet->completion.send.send_completion_tid = (unsigned long)dev;
225
226 ret = netvsc_send(dev->net_dev->dev, packet);
227 return ret;
228}
229
230static void rndis_filter_receive_response(struct rndis_device *dev,
231 struct rndis_message *resp)
232{
233 struct rndis_request *request = NULL;
234 bool found = false;
235 unsigned long flags;
236 struct net_device *ndev;
237
238 ndev = dev->net_dev->ndev;
239
240 spin_lock_irqsave(&dev->request_lock, flags);
241 list_for_each_entry(request, &dev->req_list, list_ent) {
242 /*
243 * All request/response message contains RequestId as the 1st
244 * field
245 */
246 if (request->request_msg.msg.init_req.req_id
247 == resp->msg.init_complete.req_id) {
248 found = true;
249 break;
250 }
251 }
252 spin_unlock_irqrestore(&dev->request_lock, flags);
253
254 if (found) {
255 if (resp->msg_len <= sizeof(struct rndis_message)) {
256 memcpy(&request->response_msg, resp,
257 resp->msg_len);
258 } else {
259 netdev_err(ndev,
260 "rndis response buffer overflow "
261 "detected (size %u max %zu)\n",
262 resp->msg_len,
263 sizeof(struct rndis_filter_packet));
264
265 if (resp->ndis_msg_type ==
266 REMOTE_NDIS_RESET_CMPLT) {
267 /* does not have a request id field */
268 request->response_msg.msg.reset_complete.
269 status = STATUS_BUFFER_OVERFLOW;
270 } else {
271 request->response_msg.msg.
272 init_complete.status =
273 STATUS_BUFFER_OVERFLOW;
274 }
275 }
276
277 complete(&request->wait_event);
278 } else {
279 netdev_err(ndev,
280 "no rndis request found for this response "
281 "(id 0x%x res type 0x%x)\n",
282 resp->msg.init_complete.req_id,
283 resp->ndis_msg_type);
284 }
285}
286
287static void rndis_filter_receive_indicate_status(struct rndis_device *dev,
288 struct rndis_message *resp)
289{
290 struct rndis_indicate_status *indicate =
291 &resp->msg.indicate_status;
292
293 if (indicate->status == RNDIS_STATUS_MEDIA_CONNECT) {
294 netvsc_linkstatus_callback(
295 dev->net_dev->dev, 1);
296 } else if (indicate->status == RNDIS_STATUS_MEDIA_DISCONNECT) {
297 netvsc_linkstatus_callback(
298 dev->net_dev->dev, 0);
299 } else {
300 /*
301 * TODO:
302 */
303 }
304}
305
306static void rndis_filter_receive_data(struct rndis_device *dev,
307 struct rndis_message *msg,
308 struct hv_netvsc_packet *pkt)
309{
310 struct rndis_packet *rndis_pkt;
311 u32 data_offset;
312 int i;
313
314 rndis_pkt = &msg->msg.pkt;
315
316 /*
317 * FIXME: Handle multiple rndis pkt msgs that maybe enclosed in this
318 * netvsc packet (ie TotalDataBufferLength != MessageLength)
319 */
320
321 /* Remove the rndis header and pass it back up the stack */
322 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
323
324 pkt->total_data_buflen -= data_offset;
325 pkt->page_buf[0].offset += data_offset;
326 pkt->page_buf[0].len -= data_offset;
327
328 /* Drop the 0th page, if rndis data go beyond page boundary */
329 if (pkt->page_buf[0].offset >= PAGE_SIZE) {
330 pkt->page_buf[1].offset = pkt->page_buf[0].offset - PAGE_SIZE;
331 pkt->page_buf[1].len -= pkt->page_buf[1].offset;
332 pkt->page_buf_cnt--;
333 for (i = 0; i < pkt->page_buf_cnt; i++)
334 pkt->page_buf[i] = pkt->page_buf[i+1];
335 }
336
337 pkt->is_data_pkt = true;
338
339 netvsc_recv_callback(dev->net_dev->dev, pkt);
340}
341
342int rndis_filter_receive(struct hv_device *dev,
343 struct hv_netvsc_packet *pkt)
344{
345 struct netvsc_device *net_dev = hv_get_drvdata(dev);
346 struct rndis_device *rndis_dev;
347 struct rndis_message rndis_msg;
348 struct rndis_message *rndis_hdr;
349 struct net_device *ndev;
350
351 if (!net_dev)
352 return -EINVAL;
353
354 ndev = net_dev->ndev;
355
356 /* Make sure the rndis device state is initialized */
357 if (!net_dev->extension) {
358 netdev_err(ndev, "got rndis message but no rndis device - "
359 "dropping this message!\n");
360 return -ENODEV;
361 }
362
363 rndis_dev = (struct rndis_device *)net_dev->extension;
364 if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
365 netdev_err(ndev, "got rndis message but rndis device "
366 "uninitialized...dropping this message!\n");
367 return -ENODEV;
368 }
369
370 rndis_hdr = (struct rndis_message *)kmap_atomic(
371 pfn_to_page(pkt->page_buf[0].pfn), KM_IRQ0);
372
373 rndis_hdr = (void *)((unsigned long)rndis_hdr +
374 pkt->page_buf[0].offset);
375
376 /* Make sure we got a valid rndis message */
377 if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
378 (rndis_hdr->msg_len > sizeof(struct rndis_message))) {
379 netdev_err(ndev, "incoming rndis message buffer overflow "
380 "detected (got %u, max %zu)..marking it an error!\n",
381 rndis_hdr->msg_len,
382 sizeof(struct rndis_message));
383 }
384
385 memcpy(&rndis_msg, rndis_hdr,
386 (rndis_hdr->msg_len > sizeof(struct rndis_message)) ?
387 sizeof(struct rndis_message) :
388 rndis_hdr->msg_len);
389
390 kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset, KM_IRQ0);
391
392 dump_rndis_message(dev, &rndis_msg);
393
394 switch (rndis_msg.ndis_msg_type) {
395 case REMOTE_NDIS_PACKET_MSG:
396 /* data msg */
397 rndis_filter_receive_data(rndis_dev, &rndis_msg, pkt);
398 break;
399
400 case REMOTE_NDIS_INITIALIZE_CMPLT:
401 case REMOTE_NDIS_QUERY_CMPLT:
402 case REMOTE_NDIS_SET_CMPLT:
403 /* completion msgs */
404 rndis_filter_receive_response(rndis_dev, &rndis_msg);
405 break;
406
407 case REMOTE_NDIS_INDICATE_STATUS_MSG:
408 /* notification msgs */
409 rndis_filter_receive_indicate_status(rndis_dev, &rndis_msg);
410 break;
411 default:
412 netdev_err(ndev,
413 "unhandled rndis message (type %u len %u)\n",
414 rndis_msg.ndis_msg_type,
415 rndis_msg.msg_len);
416 break;
417 }
418
419 return 0;
420}
421
422static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
423 void *result, u32 *result_size)
424{
425 struct rndis_request *request;
426 u32 inresult_size = *result_size;
427 struct rndis_query_request *query;
428 struct rndis_query_complete *query_complete;
429 int ret = 0;
430 int t;
431
432 if (!result)
433 return -EINVAL;
434
435 *result_size = 0;
436 request = get_rndis_request(dev, REMOTE_NDIS_QUERY_MSG,
437 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
438 if (!request) {
439 ret = -ENOMEM;
440 goto cleanup;
441 }
442
443 /* Setup the rndis query */
444 query = &request->request_msg.msg.query_req;
445 query->oid = oid;
446 query->info_buf_offset = sizeof(struct rndis_query_request);
447 query->info_buflen = 0;
448 query->dev_vc_handle = 0;
449
450 ret = rndis_filter_send_request(dev, request);
451 if (ret != 0)
452 goto cleanup;
453
454 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
455 if (t == 0) {
456 ret = -ETIMEDOUT;
457 goto cleanup;
458 }
459
460 /* Copy the response back */
461 query_complete = &request->response_msg.msg.query_complete;
462
463 if (query_complete->info_buflen > inresult_size) {
464 ret = -1;
465 goto cleanup;
466 }
467
468 memcpy(result,
469 (void *)((unsigned long)query_complete +
470 query_complete->info_buf_offset),
471 query_complete->info_buflen);
472
473 *result_size = query_complete->info_buflen;
474
475cleanup:
476 if (request)
477 put_rndis_request(dev, request);
478
479 return ret;
480}
481
482static int rndis_filter_query_device_mac(struct rndis_device *dev)
483{
484 u32 size = ETH_ALEN;
485
486 return rndis_filter_query_device(dev,
487 RNDIS_OID_802_3_PERMANENT_ADDRESS,
488 dev->hw_mac_adr, &size);
489}
490
491static int rndis_filter_query_device_link_status(struct rndis_device *dev)
492{
493 u32 size = sizeof(u32);
494 u32 link_status;
495 int ret;
496
497 ret = rndis_filter_query_device(dev,
498 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
499 &link_status, &size);
500 dev->link_state = (link_status != 0) ? true : false;
501
502 return ret;
503}
504
505int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
506{
507 struct rndis_request *request;
508 struct rndis_set_request *set;
509 struct rndis_set_complete *set_complete;
510 u32 status;
511 int ret, t;
512 struct net_device *ndev;
513
514 ndev = dev->net_dev->ndev;
515
516 request = get_rndis_request(dev, REMOTE_NDIS_SET_MSG,
517 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
518 sizeof(u32));
519 if (!request) {
520 ret = -ENOMEM;
521 goto cleanup;
522 }
523
524 /* Setup the rndis set */
525 set = &request->request_msg.msg.set_req;
526 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
527 set->info_buflen = sizeof(u32);
528 set->info_buf_offset = sizeof(struct rndis_set_request);
529
530 memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
531 &new_filter, sizeof(u32));
532
533 ret = rndis_filter_send_request(dev, request);
534 if (ret != 0)
535 goto cleanup;
536
537 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
538
539 if (t == 0) {
540 netdev_err(ndev,
541 "timeout before we got a set response...\n");
542 /*
543 * We can't deallocate the request since we may still receive a
544 * send completion for it.
545 */
546 goto exit;
547 } else {
548 set_complete = &request->response_msg.msg.set_complete;
549 status = set_complete->status;
550 }
551
552cleanup:
553 if (request)
554 put_rndis_request(dev, request);
555exit:
556 return ret;
557}
558
559
560static int rndis_filter_init_device(struct rndis_device *dev)
561{
562 struct rndis_request *request;
563 struct rndis_initialize_request *init;
564 struct rndis_initialize_complete *init_complete;
565 u32 status;
566 int ret, t;
567
568 request = get_rndis_request(dev, REMOTE_NDIS_INITIALIZE_MSG,
569 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
570 if (!request) {
571 ret = -ENOMEM;
572 goto cleanup;
573 }
574
575 /* Setup the rndis set */
576 init = &request->request_msg.msg.init_req;
577 init->major_ver = RNDIS_MAJOR_VERSION;
578 init->minor_ver = RNDIS_MINOR_VERSION;
579 /* FIXME: Use 1536 - rounded ethernet frame size */
580 init->max_xfer_size = 2048;
581
582 dev->state = RNDIS_DEV_INITIALIZING;
583
584 ret = rndis_filter_send_request(dev, request);
585 if (ret != 0) {
586 dev->state = RNDIS_DEV_UNINITIALIZED;
587 goto cleanup;
588 }
589
590
591 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
592
593 if (t == 0) {
594 ret = -ETIMEDOUT;
595 goto cleanup;
596 }
597
598 init_complete = &request->response_msg.msg.init_complete;
599 status = init_complete->status;
600 if (status == RNDIS_STATUS_SUCCESS) {
601 dev->state = RNDIS_DEV_INITIALIZED;
602 ret = 0;
603 } else {
604 dev->state = RNDIS_DEV_UNINITIALIZED;
605 ret = -EINVAL;
606 }
607
608cleanup:
609 if (request)
610 put_rndis_request(dev, request);
611
612 return ret;
613}
614
615static void rndis_filter_halt_device(struct rndis_device *dev)
616{
617 struct rndis_request *request;
618 struct rndis_halt_request *halt;
619
620 /* Attempt to do a rndis device halt */
621 request = get_rndis_request(dev, REMOTE_NDIS_HALT_MSG,
622 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
623 if (!request)
624 goto cleanup;
625
626 /* Setup the rndis set */
627 halt = &request->request_msg.msg.halt_req;
628 halt->req_id = atomic_inc_return(&dev->new_req_id);
629
630 /* Ignore return since this msg is optional. */
631 rndis_filter_send_request(dev, request);
632
633 dev->state = RNDIS_DEV_UNINITIALIZED;
634
635cleanup:
636 if (request)
637 put_rndis_request(dev, request);
638 return;
639}
640
641static int rndis_filter_open_device(struct rndis_device *dev)
642{
643 int ret;
644
645 if (dev->state != RNDIS_DEV_INITIALIZED)
646 return 0;
647
648 ret = rndis_filter_set_packet_filter(dev,
649 NDIS_PACKET_TYPE_BROADCAST |
650 NDIS_PACKET_TYPE_ALL_MULTICAST |
651 NDIS_PACKET_TYPE_DIRECTED);
652 if (ret == 0)
653 dev->state = RNDIS_DEV_DATAINITIALIZED;
654
655 return ret;
656}
657
658static int rndis_filter_close_device(struct rndis_device *dev)
659{
660 int ret;
661
662 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
663 return 0;
664
665 ret = rndis_filter_set_packet_filter(dev, 0);
666 if (ret == 0)
667 dev->state = RNDIS_DEV_INITIALIZED;
668
669 return ret;
670}
671
672int rndis_filter_device_add(struct hv_device *dev,
673 void *additional_info)
674{
675 int ret;
676 struct netvsc_device *net_device;
677 struct rndis_device *rndis_device;
678 struct netvsc_device_info *device_info = additional_info;
679
680 rndis_device = get_rndis_device();
681 if (!rndis_device)
682 return -ENODEV;
683
684 /*
685 * Let the inner driver handle this first to create the netvsc channel
686 * NOTE! Once the channel is created, we may get a receive callback
687 * (RndisFilterOnReceive()) before this call is completed
688 */
689 ret = netvsc_device_add(dev, additional_info);
690 if (ret != 0) {
691 kfree(rndis_device);
692 return ret;
693 }
694
695
696 /* Initialize the rndis device */
697 net_device = hv_get_drvdata(dev);
698
699 net_device->extension = rndis_device;
700 rndis_device->net_dev = net_device;
701
702 /* Send the rndis initialization message */
703 ret = rndis_filter_init_device(rndis_device);
704 if (ret != 0) {
705 /*
706 * TODO: If rndis init failed, we will need to shut down the
707 * channel
708 */
709 }
710
711 /* Get the mac address */
712 ret = rndis_filter_query_device_mac(rndis_device);
713 if (ret != 0) {
714 /*
715 * TODO: shutdown rndis device and the channel
716 */
717 }
718
719 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
720
721 rndis_filter_query_device_link_status(rndis_device);
722
723 device_info->link_state = rndis_device->link_state;
724
725 dev_info(&dev->device, "Device MAC %pM link state %s\n",
726 rndis_device->hw_mac_adr,
727 device_info->link_state ? "down" : "up");
728
729 return ret;
730}
731
732void rndis_filter_device_remove(struct hv_device *dev)
733{
734 struct netvsc_device *net_dev = hv_get_drvdata(dev);
735 struct rndis_device *rndis_dev = net_dev->extension;
736
737 /* Halt and release the rndis device */
738 rndis_filter_halt_device(rndis_dev);
739
740 kfree(rndis_dev);
741 net_dev->extension = NULL;
742
743 netvsc_device_remove(dev);
744}
745
746
747int rndis_filter_open(struct hv_device *dev)
748{
749 struct netvsc_device *net_device = hv_get_drvdata(dev);
750
751 if (!net_device)
752 return -EINVAL;
753
754 return rndis_filter_open_device(net_device->extension);
755}
756
757int rndis_filter_close(struct hv_device *dev)
758{
759 struct netvsc_device *netDevice = hv_get_drvdata(dev);
760
761 if (!netDevice)
762 return -EINVAL;
763
764 return rndis_filter_close_device(netDevice->extension);
765}
766
767int rndis_filter_send(struct hv_device *dev,
768 struct hv_netvsc_packet *pkt)
769{
770 int ret;
771 struct rndis_filter_packet *filterPacket;
772 struct rndis_message *rndisMessage;
773 struct rndis_packet *rndisPacket;
774 u32 rndisMessageSize;
775
776 /* Add the rndis header */
777 filterPacket = (struct rndis_filter_packet *)pkt->extension;
778
779 memset(filterPacket, 0, sizeof(struct rndis_filter_packet));
780
781 rndisMessage = &filterPacket->msg;
782 rndisMessageSize = RNDIS_MESSAGE_SIZE(struct rndis_packet);
783
784 rndisMessage->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
785 rndisMessage->msg_len = pkt->total_data_buflen +
786 rndisMessageSize;
787
788 rndisPacket = &rndisMessage->msg.pkt;
789 rndisPacket->data_offset = sizeof(struct rndis_packet);
790 rndisPacket->data_len = pkt->total_data_buflen;
791
792 pkt->is_data_pkt = true;
793 pkt->page_buf[0].pfn = virt_to_phys(rndisMessage) >> PAGE_SHIFT;
794 pkt->page_buf[0].offset =
795 (unsigned long)rndisMessage & (PAGE_SIZE-1);
796 pkt->page_buf[0].len = rndisMessageSize;
797
798 /* Save the packet send completion and context */
799 filterPacket->completion = pkt->completion.send.send_completion;
800 filterPacket->completion_ctx =
801 pkt->completion.send.send_completion_ctx;
802
803 /* Use ours */
804 pkt->completion.send.send_completion = rndis_filter_send_completion;
805 pkt->completion.send.send_completion_ctx = filterPacket;
806
807 ret = netvsc_send(dev, pkt);
808 if (ret != 0) {
809 /*
810 * Reset the completion to originals to allow retries from
811 * above
812 */
813 pkt->completion.send.send_completion =
814 filterPacket->completion;
815 pkt->completion.send.send_completion_ctx =
816 filterPacket->completion_ctx;
817 }
818
819 return ret;
820}
821
822static void rndis_filter_send_completion(void *ctx)
823{
824 struct rndis_filter_packet *filterPacket = ctx;
825
826 /* Pass it back to the original handler */
827 filterPacket->completion(filterPacket->completion_ctx);
828}
829
830
831static void rndis_filter_send_request_completion(void *ctx)
832{
833 /* Noop */
834}