aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv
diff options
context:
space:
mode:
authorHaiyang Zhang <haiyangz@microsoft.com>2011-11-28 16:35:35 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2011-11-29 02:12:36 -0500
commit95fa0405c5991726e06c08ffcd8ff872f7fb4f2d (patch)
treeb03a3a6278d9eb2baab16f45082bdb2ac1a6a183 /drivers/net/hyperv
parent3b724ca14565747926c23af1fa1afb1848c3f448 (diff)
staging: hv: move hv_netvsc out of staging area
hv_netvsc has been reviewed on netdev mailing list on 6/09/2011. All recommended changes have been made. We are requesting to move it out of staging area. Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: KY Srinivasan <kys@microsoft.com> Signed-off-by: Mike Sterling <Mike.Sterling@microsoft.com> Acked-by: Stephen Hemminger <shemminger@vyatta.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r--drivers/net/hyperv/Kconfig5
-rw-r--r--drivers/net/hyperv/Makefile3
-rw-r--r--drivers/net/hyperv/hyperv_net.h1058
-rw-r--r--drivers/net/hyperv/netvsc.c939
-rw-r--r--drivers/net/hyperv/netvsc_drv.c456
-rw-r--r--drivers/net/hyperv/rndis_filter.c855
6 files changed, 3316 insertions, 0 deletions
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
new file mode 100644
index 000000000000..936968d23559
--- /dev/null
+++ b/drivers/net/hyperv/Kconfig
@@ -0,0 +1,5 @@
1config HYPERV_NET
2 tristate "Microsoft Hyper-V virtual network driver"
3 depends on HYPERV
4 help
5 Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
new file mode 100644
index 000000000000..c8a66827100c
--- /dev/null
+++ b/drivers/net/hyperv/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
2
3hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
new file mode 100644
index 000000000000..ac1ec8405124
--- /dev/null
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -0,0 +1,1058 @@
1/*
2 *
3 * Copyright (c) 2011, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24
25#ifndef _HYPERV_NET_H
26#define _HYPERV_NET_H
27
28#include <linux/list.h>
29#include <linux/hyperv.h>
30
31/* Fwd declaration */
32struct hv_netvsc_packet;
33
34/* Represent the xfer page packet which contains 1 or more netvsc packet */
35struct xferpage_packet {
36 struct list_head list_ent;
37
38 /* # of netvsc packets this xfer packet contains */
39 u32 count;
40};
41
42/* The number of pages which are enough to cover jumbo frame buffer. */
43#define NETVSC_PACKET_MAXPAGE 4
44
45/*
46 * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
47 * within the RNDIS
48 */
49struct hv_netvsc_packet {
50 /* Bookkeeping stuff */
51 struct list_head list_ent;
52
53 struct hv_device *device;
54 bool is_data_pkt;
55
56 /*
57 * Valid only for receives when we break a xfer page packet
58 * into multiple netvsc packets
59 */
60 struct xferpage_packet *xfer_page_pkt;
61
62 union {
63 struct {
64 u64 recv_completion_tid;
65 void *recv_completion_ctx;
66 void (*recv_completion)(void *context);
67 } recv;
68 struct {
69 u64 send_completion_tid;
70 void *send_completion_ctx;
71 void (*send_completion)(void *context);
72 } send;
73 } completion;
74
75 /* This points to the memory after page_buf */
76 void *extension;
77
78 u32 total_data_buflen;
79 /* Points to the send/receive buffer where the ethernet frame is */
80 u32 page_buf_cnt;
81 struct hv_page_buffer page_buf[NETVSC_PACKET_MAXPAGE];
82};
83
84struct netvsc_device_info {
85 unsigned char mac_adr[6];
86 bool link_state; /* 0 - link up, 1 - link down */
87 int ring_size;
88};
89
90/* Interface */
91int netvsc_device_add(struct hv_device *device, void *additional_info);
92int netvsc_device_remove(struct hv_device *device);
93int netvsc_send(struct hv_device *device,
94 struct hv_netvsc_packet *packet);
95void netvsc_linkstatus_callback(struct hv_device *device_obj,
96 unsigned int status);
97int netvsc_recv_callback(struct hv_device *device_obj,
98 struct hv_netvsc_packet *packet);
99int rndis_filter_open(struct hv_device *dev);
100int rndis_filter_close(struct hv_device *dev);
101int rndis_filter_device_add(struct hv_device *dev,
102 void *additional_info);
103void rndis_filter_device_remove(struct hv_device *dev);
104int rndis_filter_receive(struct hv_device *dev,
105 struct hv_netvsc_packet *pkt);
106
107
108
109int rndis_filter_send(struct hv_device *dev,
110 struct hv_netvsc_packet *pkt);
111
112#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
113
114#define NVSP_PROTOCOL_VERSION_1 2
115#define NVSP_MIN_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
116#define NVSP_MAX_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
117
118enum {
119 NVSP_MSG_TYPE_NONE = 0,
120
121 /* Init Messages */
122 NVSP_MSG_TYPE_INIT = 1,
123 NVSP_MSG_TYPE_INIT_COMPLETE = 2,
124
125 NVSP_VERSION_MSG_START = 100,
126
127 /* Version 1 Messages */
128 NVSP_MSG1_TYPE_SEND_NDIS_VER = NVSP_VERSION_MSG_START,
129
130 NVSP_MSG1_TYPE_SEND_RECV_BUF,
131 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE,
132 NVSP_MSG1_TYPE_REVOKE_RECV_BUF,
133
134 NVSP_MSG1_TYPE_SEND_SEND_BUF,
135 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE,
136 NVSP_MSG1_TYPE_REVOKE_SEND_BUF,
137
138 NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
139 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
140
141 /*
142 * This should be set to the number of messages for the version with
143 * the maximum number of messages.
144 */
145 NVSP_NUM_MSG_PER_VERSION = 9,
146};
147
148enum {
149 NVSP_STAT_NONE = 0,
150 NVSP_STAT_SUCCESS,
151 NVSP_STAT_FAIL,
152 NVSP_STAT_PROTOCOL_TOO_NEW,
153 NVSP_STAT_PROTOCOL_TOO_OLD,
154 NVSP_STAT_INVALID_RNDIS_PKT,
155 NVSP_STAT_BUSY,
156 NVSP_STAT_MAX,
157};
158
159struct nvsp_message_header {
160 u32 msg_type;
161};
162
163/* Init Messages */
164
165/*
166 * This message is used by the VSC to initialize the channel after the channels
167 * has been opened. This message should never include anything other then
168 * versioning (i.e. this message will be the same for ever).
169 */
170struct nvsp_message_init {
171 u32 min_protocol_ver;
172 u32 max_protocol_ver;
173} __packed;
174
175/*
176 * This message is used by the VSP to complete the initialization of the
177 * channel. This message should never include anything other then versioning
178 * (i.e. this message will be the same for ever).
179 */
180struct nvsp_message_init_complete {
181 u32 negotiated_protocol_ver;
182 u32 max_mdl_chain_len;
183 u32 status;
184} __packed;
185
186union nvsp_message_init_uber {
187 struct nvsp_message_init init;
188 struct nvsp_message_init_complete init_complete;
189} __packed;
190
191/* Version 1 Messages */
192
193/*
194 * This message is used by the VSC to send the NDIS version to the VSP. The VSP
195 * can use this information when handling OIDs sent by the VSC.
196 */
197struct nvsp_1_message_send_ndis_version {
198 u32 ndis_major_ver;
199 u32 ndis_minor_ver;
200} __packed;
201
202/*
203 * This message is used by the VSC to send a receive buffer to the VSP. The VSP
204 * can then use the receive buffer to send data to the VSC.
205 */
206struct nvsp_1_message_send_receive_buffer {
207 u32 gpadl_handle;
208 u16 id;
209} __packed;
210
211struct nvsp_1_receive_buffer_section {
212 u32 offset;
213 u32 sub_alloc_size;
214 u32 num_sub_allocs;
215 u32 end_offset;
216} __packed;
217
218/*
219 * This message is used by the VSP to acknowledge a receive buffer send by the
220 * VSC. This message must be sent by the VSP before the VSP uses the receive
221 * buffer.
222 */
223struct nvsp_1_message_send_receive_buffer_complete {
224 u32 status;
225 u32 num_sections;
226
227 /*
228 * The receive buffer is split into two parts, a large suballocation
229 * section and a small suballocation section. These sections are then
230 * suballocated by a certain size.
231 */
232
233 /*
234 * For example, the following break up of the receive buffer has 6
235 * large suballocations and 10 small suballocations.
236 */
237
238 /*
239 * | Large Section | | Small Section |
240 * ------------------------------------------------------------
241 * | | | | | | | | | | | | | | | | | |
242 * | |
243 * LargeOffset SmallOffset
244 */
245
246 struct nvsp_1_receive_buffer_section sections[1];
247} __packed;
248
249/*
250 * This message is sent by the VSC to revoke the receive buffer. After the VSP
251 * completes this transaction, the vsp should never use the receive buffer
252 * again.
253 */
254struct nvsp_1_message_revoke_receive_buffer {
255 u16 id;
256};
257
258/*
259 * This message is used by the VSC to send a send buffer to the VSP. The VSC
260 * can then use the send buffer to send data to the VSP.
261 */
262struct nvsp_1_message_send_send_buffer {
263 u32 gpadl_handle;
264 u16 id;
265} __packed;
266
267/*
268 * This message is used by the VSP to acknowledge a send buffer sent by the
269 * VSC. This message must be sent by the VSP before the VSP uses the sent
270 * buffer.
271 */
272struct nvsp_1_message_send_send_buffer_complete {
273 u32 status;
274
275 /*
276 * The VSC gets to choose the size of the send buffer and the VSP gets
277 * to choose the sections size of the buffer. This was done to enable
278 * dynamic reconfigurations when the cost of GPA-direct buffers
279 * decreases.
280 */
281 u32 section_size;
282} __packed;
283
284/*
285 * This message is sent by the VSC to revoke the send buffer. After the VSP
286 * completes this transaction, the vsp should never use the send buffer again.
287 */
288struct nvsp_1_message_revoke_send_buffer {
289 u16 id;
290};
291
292/*
293 * This message is used by both the VSP and the VSC to send a RNDIS message to
294 * the opposite channel endpoint.
295 */
296struct nvsp_1_message_send_rndis_packet {
297 /*
298 * This field is specified by RNIDS. They assume there's two different
299 * channels of communication. However, the Network VSP only has one.
300 * Therefore, the channel travels with the RNDIS packet.
301 */
302 u32 channel_type;
303
304 /*
305 * This field is used to send part or all of the data through a send
306 * buffer. This values specifies an index into the send buffer. If the
307 * index is 0xFFFFFFFF, then the send buffer is not being used and all
308 * of the data was sent through other VMBus mechanisms.
309 */
310 u32 send_buf_section_index;
311 u32 send_buf_section_size;
312} __packed;
313
314/*
315 * This message is used by both the VSP and the VSC to complete a RNDIS message
316 * to the opposite channel endpoint. At this point, the initiator of this
317 * message cannot use any resources associated with the original RNDIS packet.
318 */
319struct nvsp_1_message_send_rndis_packet_complete {
320 u32 status;
321};
322
323union nvsp_1_message_uber {
324 struct nvsp_1_message_send_ndis_version send_ndis_ver;
325
326 struct nvsp_1_message_send_receive_buffer send_recv_buf;
327 struct nvsp_1_message_send_receive_buffer_complete
328 send_recv_buf_complete;
329 struct nvsp_1_message_revoke_receive_buffer revoke_recv_buf;
330
331 struct nvsp_1_message_send_send_buffer send_send_buf;
332 struct nvsp_1_message_send_send_buffer_complete send_send_buf_complete;
333 struct nvsp_1_message_revoke_send_buffer revoke_send_buf;
334
335 struct nvsp_1_message_send_rndis_packet send_rndis_pkt;
336 struct nvsp_1_message_send_rndis_packet_complete
337 send_rndis_pkt_complete;
338} __packed;
339
340union nvsp_all_messages {
341 union nvsp_message_init_uber init_msg;
342 union nvsp_1_message_uber v1_msg;
343} __packed;
344
345/* ALL Messages */
346struct nvsp_message {
347 struct nvsp_message_header hdr;
348 union nvsp_all_messages msg;
349} __packed;
350
351
352
353
354/* #define NVSC_MIN_PROTOCOL_VERSION 1 */
355/* #define NVSC_MAX_PROTOCOL_VERSION 1 */
356
357#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024) /* 1MB */
358
359#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
360
361#define NETVSC_RECEIVE_SG_COUNT 1
362
363/* Preallocated receive packets */
364#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
365
366#define NETVSC_PACKET_SIZE 2048
367
368/* Per netvsc channel-specific */
369struct netvsc_device {
370 struct hv_device *dev;
371
372 atomic_t num_outstanding_sends;
373 bool destroy;
374 /*
375 * List of free preallocated hv_netvsc_packet to represent receive
376 * packet
377 */
378 struct list_head recv_pkt_list;
379 spinlock_t recv_pkt_list_lock;
380
381 /* Receive buffer allocated by us but manages by NetVSP */
382 void *recv_buf;
383 u32 recv_buf_size;
384 u32 recv_buf_gpadl_handle;
385 u32 recv_section_cnt;
386 struct nvsp_1_receive_buffer_section *recv_section;
387
388 /* Used for NetVSP initialization protocol */
389 struct completion channel_init_wait;
390 struct nvsp_message channel_init_pkt;
391
392 struct nvsp_message revoke_packet;
393 /* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
394
395 struct net_device *ndev;
396
397 /* Holds rndis device info */
398 void *extension;
399};
400
401
402/* Status codes */
403
404
405#ifndef STATUS_SUCCESS
406#define STATUS_SUCCESS (0x00000000L)
407#endif
408
409#ifndef STATUS_UNSUCCESSFUL
410#define STATUS_UNSUCCESSFUL (0xC0000001L)
411#endif
412
413#ifndef STATUS_PENDING
414#define STATUS_PENDING (0x00000103L)
415#endif
416
417#ifndef STATUS_INSUFFICIENT_RESOURCES
418#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
419#endif
420
421#ifndef STATUS_BUFFER_OVERFLOW
422#define STATUS_BUFFER_OVERFLOW (0x80000005L)
423#endif
424
425#ifndef STATUS_NOT_SUPPORTED
426#define STATUS_NOT_SUPPORTED (0xC00000BBL)
427#endif
428
429#define RNDIS_STATUS_SUCCESS (STATUS_SUCCESS)
430#define RNDIS_STATUS_PENDING (STATUS_PENDING)
431#define RNDIS_STATUS_NOT_RECOGNIZED (0x00010001L)
432#define RNDIS_STATUS_NOT_COPIED (0x00010002L)
433#define RNDIS_STATUS_NOT_ACCEPTED (0x00010003L)
434#define RNDIS_STATUS_CALL_ACTIVE (0x00010007L)
435
436#define RNDIS_STATUS_ONLINE (0x40010003L)
437#define RNDIS_STATUS_RESET_START (0x40010004L)
438#define RNDIS_STATUS_RESET_END (0x40010005L)
439#define RNDIS_STATUS_RING_STATUS (0x40010006L)
440#define RNDIS_STATUS_CLOSED (0x40010007L)
441#define RNDIS_STATUS_WAN_LINE_UP (0x40010008L)
442#define RNDIS_STATUS_WAN_LINE_DOWN (0x40010009L)
443#define RNDIS_STATUS_WAN_FRAGMENT (0x4001000AL)
444#define RNDIS_STATUS_MEDIA_CONNECT (0x4001000BL)
445#define RNDIS_STATUS_MEDIA_DISCONNECT (0x4001000CL)
446#define RNDIS_STATUS_HARDWARE_LINE_UP (0x4001000DL)
447#define RNDIS_STATUS_HARDWARE_LINE_DOWN (0x4001000EL)
448#define RNDIS_STATUS_INTERFACE_UP (0x4001000FL)
449#define RNDIS_STATUS_INTERFACE_DOWN (0x40010010L)
450#define RNDIS_STATUS_MEDIA_BUSY (0x40010011L)
451#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION (0x40010012L)
452#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION
453#define RNDIS_STATUS_LINK_SPEED_CHANGE (0x40010013L)
454
455#define RNDIS_STATUS_NOT_RESETTABLE (0x80010001L)
456#define RNDIS_STATUS_SOFT_ERRORS (0x80010003L)
457#define RNDIS_STATUS_HARD_ERRORS (0x80010004L)
458#define RNDIS_STATUS_BUFFER_OVERFLOW (STATUS_BUFFER_OVERFLOW)
459
460#define RNDIS_STATUS_FAILURE (STATUS_UNSUCCESSFUL)
461#define RNDIS_STATUS_RESOURCES (STATUS_INSUFFICIENT_RESOURCES)
462#define RNDIS_STATUS_CLOSING (0xC0010002L)
463#define RNDIS_STATUS_BAD_VERSION (0xC0010004L)
464#define RNDIS_STATUS_BAD_CHARACTERISTICS (0xC0010005L)
465#define RNDIS_STATUS_ADAPTER_NOT_FOUND (0xC0010006L)
466#define RNDIS_STATUS_OPEN_FAILED (0xC0010007L)
467#define RNDIS_STATUS_DEVICE_FAILED (0xC0010008L)
468#define RNDIS_STATUS_MULTICAST_FULL (0xC0010009L)
469#define RNDIS_STATUS_MULTICAST_EXISTS (0xC001000AL)
470#define RNDIS_STATUS_MULTICAST_NOT_FOUND (0xC001000BL)
471#define RNDIS_STATUS_REQUEST_ABORTED (0xC001000CL)
472#define RNDIS_STATUS_RESET_IN_PROGRESS (0xC001000DL)
473#define RNDIS_STATUS_CLOSING_INDICATING (0xC001000EL)
474#define RNDIS_STATUS_NOT_SUPPORTED (STATUS_NOT_SUPPORTED)
475#define RNDIS_STATUS_INVALID_PACKET (0xC001000FL)
476#define RNDIS_STATUS_OPEN_LIST_FULL (0xC0010010L)
477#define RNDIS_STATUS_ADAPTER_NOT_READY (0xC0010011L)
478#define RNDIS_STATUS_ADAPTER_NOT_OPEN (0xC0010012L)
479#define RNDIS_STATUS_NOT_INDICATING (0xC0010013L)
480#define RNDIS_STATUS_INVALID_LENGTH (0xC0010014L)
481#define RNDIS_STATUS_INVALID_DATA (0xC0010015L)
482#define RNDIS_STATUS_BUFFER_TOO_SHORT (0xC0010016L)
483#define RNDIS_STATUS_INVALID_OID (0xC0010017L)
484#define RNDIS_STATUS_ADAPTER_REMOVED (0xC0010018L)
485#define RNDIS_STATUS_UNSUPPORTED_MEDIA (0xC0010019L)
486#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE (0xC001001AL)
487#define RNDIS_STATUS_FILE_NOT_FOUND (0xC001001BL)
488#define RNDIS_STATUS_ERROR_READING_FILE (0xC001001CL)
489#define RNDIS_STATUS_ALREADY_MAPPED (0xC001001DL)
490#define RNDIS_STATUS_RESOURCE_CONFLICT (0xC001001EL)
491#define RNDIS_STATUS_NO_CABLE (0xC001001FL)
492
493#define RNDIS_STATUS_INVALID_SAP (0xC0010020L)
494#define RNDIS_STATUS_SAP_IN_USE (0xC0010021L)
495#define RNDIS_STATUS_INVALID_ADDRESS (0xC0010022L)
496#define RNDIS_STATUS_VC_NOT_ACTIVATED (0xC0010023L)
497#define RNDIS_STATUS_DEST_OUT_OF_ORDER (0xC0010024L)
498#define RNDIS_STATUS_VC_NOT_AVAILABLE (0xC0010025L)
499#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE (0xC0010026L)
500#define RNDIS_STATUS_INCOMPATABLE_QOS (0xC0010027L)
501#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED (0xC0010028L)
502#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION (0xC0010029L)
503
504#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR (0xC0011000L)
505
506/* Object Identifiers used by NdisRequest Query/Set Information */
507/* General Objects */
508#define RNDIS_OID_GEN_SUPPORTED_LIST 0x00010101
509#define RNDIS_OID_GEN_HARDWARE_STATUS 0x00010102
510#define RNDIS_OID_GEN_MEDIA_SUPPORTED 0x00010103
511#define RNDIS_OID_GEN_MEDIA_IN_USE 0x00010104
512#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
513#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
514#define RNDIS_OID_GEN_LINK_SPEED 0x00010107
515#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
516#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
517#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
518#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
519#define RNDIS_OID_GEN_VENDOR_ID 0x0001010C
520#define RNDIS_OID_GEN_VENDOR_DESCRIPTION 0x0001010D
521#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
522#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
523#define RNDIS_OID_GEN_DRIVER_VERSION 0x00010110
524#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
525#define RNDIS_OID_GEN_PROTOCOL_OPTIONS 0x00010112
526#define RNDIS_OID_GEN_MAC_OPTIONS 0x00010113
527#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
528#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
529#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
530#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
531#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
532#define RNDIS_OID_GEN_MACHINE_NAME 0x0001021A
533#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
534
535#define RNDIS_OID_GEN_XMIT_OK 0x00020101
536#define RNDIS_OID_GEN_RCV_OK 0x00020102
537#define RNDIS_OID_GEN_XMIT_ERROR 0x00020103
538#define RNDIS_OID_GEN_RCV_ERROR 0x00020104
539#define RNDIS_OID_GEN_RCV_NO_BUFFER 0x00020105
540
541#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT 0x00020201
542#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
543#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT 0x00020203
544#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
545#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT 0x00020205
546#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
547#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV 0x00020207
548#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
549#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV 0x00020209
550#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
551#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV 0x0002020B
552#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
553
554#define RNDIS_OID_GEN_RCV_CRC_ERROR 0x0002020D
555#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
556
557#define RNDIS_OID_GEN_GET_TIME_CAPS 0x0002020F
558#define RNDIS_OID_GEN_GET_NETCARD_TIME 0x00020210
559
560/* These are connection-oriented general OIDs. */
561/* These replace the above OIDs for connection-oriented media. */
562#define RNDIS_OID_GEN_CO_SUPPORTED_LIST 0x00010101
563#define RNDIS_OID_GEN_CO_HARDWARE_STATUS 0x00010102
564#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED 0x00010103
565#define RNDIS_OID_GEN_CO_MEDIA_IN_USE 0x00010104
566#define RNDIS_OID_GEN_CO_LINK_SPEED 0x00010105
567#define RNDIS_OID_GEN_CO_VENDOR_ID 0x00010106
568#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION 0x00010107
569#define RNDIS_OID_GEN_CO_DRIVER_VERSION 0x00010108
570#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS 0x00010109
571#define RNDIS_OID_GEN_CO_MAC_OPTIONS 0x0001010A
572#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS 0x0001010B
573#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION 0x0001010C
574#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED 0x0001010D
575
576#define RNDIS_OID_GEN_CO_GET_TIME_CAPS 0x00010201
577#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME 0x00010202
578
579/* These are connection-oriented statistics OIDs. */
580#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK 0x00020101
581#define RNDIS_OID_GEN_CO_RCV_PDUS_OK 0x00020102
582#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR 0x00020103
583#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR 0x00020104
584#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER 0x00020105
585
586
587#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR 0x00020201
588#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH 0x00020202
589#define RNDIS_OID_GEN_CO_BYTES_XMIT 0x00020203
590#define RNDIS_OID_GEN_CO_BYTES_RCV 0x00020204
591#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING 0x00020205
592#define RNDIS_OID_GEN_CO_NETCARD_LOAD 0x00020206
593
594/* These are objects for Connection-oriented media call-managers. */
595#define RNDIS_OID_CO_ADD_PVC 0xFF000001
596#define RNDIS_OID_CO_DELETE_PVC 0xFF000002
597#define RNDIS_OID_CO_GET_CALL_INFORMATION 0xFF000003
598#define RNDIS_OID_CO_ADD_ADDRESS 0xFF000004
599#define RNDIS_OID_CO_DELETE_ADDRESS 0xFF000005
600#define RNDIS_OID_CO_GET_ADDRESSES 0xFF000006
601#define RNDIS_OID_CO_ADDRESS_CHANGE 0xFF000007
602#define RNDIS_OID_CO_SIGNALING_ENABLED 0xFF000008
603#define RNDIS_OID_CO_SIGNALING_DISABLED 0xFF000009
604
605/* 802.3 Objects (Ethernet) */
606#define RNDIS_OID_802_3_PERMANENT_ADDRESS 0x01010101
607#define RNDIS_OID_802_3_CURRENT_ADDRESS 0x01010102
608#define RNDIS_OID_802_3_MULTICAST_LIST 0x01010103
609#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
610#define RNDIS_OID_802_3_MAC_OPTIONS 0x01010105
611
612#define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001
613
614#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
615#define RNDIS_OID_802_3_XMIT_ONE_COLLISION 0x01020102
616#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
617
618#define RNDIS_OID_802_3_XMIT_DEFERRED 0x01020201
619#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
620#define RNDIS_OID_802_3_RCV_OVERRUN 0x01020203
621#define RNDIS_OID_802_3_XMIT_UNDERRUN 0x01020204
622#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
623#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
624#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
625
626/* Remote NDIS message types */
627#define REMOTE_NDIS_PACKET_MSG 0x00000001
628#define REMOTE_NDIS_INITIALIZE_MSG 0x00000002
629#define REMOTE_NDIS_HALT_MSG 0x00000003
630#define REMOTE_NDIS_QUERY_MSG 0x00000004
631#define REMOTE_NDIS_SET_MSG 0x00000005
632#define REMOTE_NDIS_RESET_MSG 0x00000006
633#define REMOTE_NDIS_INDICATE_STATUS_MSG 0x00000007
634#define REMOTE_NDIS_KEEPALIVE_MSG 0x00000008
635
636#define REMOTE_CONDIS_MP_CREATE_VC_MSG 0x00008001
637#define REMOTE_CONDIS_MP_DELETE_VC_MSG 0x00008002
638#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG 0x00008005
639#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG 0x00008006
640#define REMOTE_CONDIS_INDICATE_STATUS_MSG 0x00008007
641
642/* Remote NDIS message completion types */
643#define REMOTE_NDIS_INITIALIZE_CMPLT 0x80000002
644#define REMOTE_NDIS_QUERY_CMPLT 0x80000004
645#define REMOTE_NDIS_SET_CMPLT 0x80000005
646#define REMOTE_NDIS_RESET_CMPLT 0x80000006
647#define REMOTE_NDIS_KEEPALIVE_CMPLT 0x80000008
648
649#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT 0x80008001
650#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT 0x80008002
651#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT 0x80008005
652#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT 0x80008006
653
654/*
655 * Reserved message type for private communication between lower-layer host
656 * driver and remote device, if necessary.
657 */
658#define REMOTE_NDIS_BUS_MSG 0xff000001
659
660/* Defines for DeviceFlags in struct rndis_initialize_complete */
661#define RNDIS_DF_CONNECTIONLESS 0x00000001
662#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002
663#define RNDIS_DF_RAW_DATA 0x00000004
664
665/* Remote NDIS medium types. */
666#define RNDIS_MEDIUM_802_3 0x00000000
667#define RNDIS_MEDIUM_802_5 0x00000001
668#define RNDIS_MEDIUM_FDDI 0x00000002
669#define RNDIS_MEDIUM_WAN 0x00000003
670#define RNDIS_MEDIUM_LOCAL_TALK 0x00000004
671#define RNDIS_MEDIUM_ARCNET_RAW 0x00000006
672#define RNDIS_MEDIUM_ARCNET_878_2 0x00000007
673#define RNDIS_MEDIUM_ATM 0x00000008
674#define RNDIS_MEDIUM_WIRELESS_WAN 0x00000009
675#define RNDIS_MEDIUM_IRDA 0x0000000a
676#define RNDIS_MEDIUM_CO_WAN 0x0000000b
677/* Not a real medium, defined as an upper-bound */
678#define RNDIS_MEDIUM_MAX 0x0000000d
679
680
681/* Remote NDIS medium connection states. */
682#define RNDIS_MEDIA_STATE_CONNECTED 0x00000000
683#define RNDIS_MEDIA_STATE_DISCONNECTED 0x00000001
684
685/* Remote NDIS version numbers */
686#define RNDIS_MAJOR_VERSION 0x00000001
687#define RNDIS_MINOR_VERSION 0x00000000
688
689
690/* NdisInitialize message */
691struct rndis_initialize_request {
692 u32 req_id;
693 u32 major_ver;
694 u32 minor_ver;
695 u32 max_xfer_size;
696};
697
698/* Response to NdisInitialize */
699struct rndis_initialize_complete {
700 u32 req_id;
701 u32 status;
702 u32 major_ver;
703 u32 minor_ver;
704 u32 dev_flags;
705 u32 medium;
706 u32 max_pkt_per_msg;
707 u32 max_xfer_size;
708 u32 pkt_alignment_factor;
709 u32 af_list_offset;
710 u32 af_list_size;
711};
712
713/* Call manager devices only: Information about an address family */
714/* supported by the device is appended to the response to NdisInitialize. */
715struct rndis_co_address_family {
716 u32 address_family;
717 u32 major_ver;
718 u32 minor_ver;
719};
720
721/* NdisHalt message */
722struct rndis_halt_request {
723 u32 req_id;
724};
725
726/* NdisQueryRequest message */
727struct rndis_query_request {
728 u32 req_id;
729 u32 oid;
730 u32 info_buflen;
731 u32 info_buf_offset;
732 u32 dev_vc_handle;
733};
734
735/* Response to NdisQueryRequest */
736struct rndis_query_complete {
737 u32 req_id;
738 u32 status;
739 u32 info_buflen;
740 u32 info_buf_offset;
741};
742
743/* NdisSetRequest message */
744struct rndis_set_request {
745 u32 req_id;
746 u32 oid;
747 u32 info_buflen;
748 u32 info_buf_offset;
749 u32 dev_vc_handle;
750};
751
752/* Response to NdisSetRequest */
753struct rndis_set_complete {
754 u32 req_id;
755 u32 status;
756};
757
758/* NdisReset message */
759struct rndis_reset_request {
760 u32 reserved;
761};
762
763/* Response to NdisReset */
764struct rndis_reset_complete {
765 u32 status;
766 u32 addressing_reset;
767};
768
769/* NdisMIndicateStatus message */
770struct rndis_indicate_status {
771 u32 status;
772 u32 status_buflen;
773 u32 status_buf_offset;
774};
775
776/* Diagnostic information passed as the status buffer in */
777/* struct rndis_indicate_status messages signifying error conditions. */
778struct rndis_diagnostic_info {
779 u32 diag_status;
780 u32 error_offset;
781};
782
783/* NdisKeepAlive message */
784struct rndis_keepalive_request {
785 u32 req_id;
786};
787
788/* Response to NdisKeepAlive */
789struct rndis_keepalive_complete {
790 u32 req_id;
791 u32 status;
792};
793
794/*
795 * Data message. All Offset fields contain byte offsets from the beginning of
796 * struct rndis_packet. All Length fields are in bytes. VcHandle is set
797 * to 0 for connectionless data, otherwise it contains the VC handle.
798 */
799struct rndis_packet {
800 u32 data_offset;
801 u32 data_len;
802 u32 oob_data_offset;
803 u32 oob_data_len;
804 u32 num_oob_data_elements;
805 u32 per_pkt_info_offset;
806 u32 per_pkt_info_len;
807 u32 vc_handle;
808 u32 reserved;
809};
810
811/* Optional Out of Band data associated with a Data message. */
812struct rndis_oobd {
813 u32 size;
814 u32 type;
815 u32 class_info_offset;
816};
817
818/* Packet extension field contents associated with a Data message. */
819struct rndis_per_packet_info {
820 u32 size;
821 u32 type;
822 u32 per_pkt_info_offset;
823};
824
825/* Format of Information buffer passed in a SetRequest for the OID */
826/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
827struct rndis_config_parameter_info {
828 u32 parameter_name_offset;
829 u32 parameter_name_length;
830 u32 parameter_type;
831 u32 parameter_value_offset;
832 u32 parameter_value_length;
833};
834
835/* Values for ParameterType in struct rndis_config_parameter_info */
836#define RNDIS_CONFIG_PARAM_TYPE_INTEGER 0
837#define RNDIS_CONFIG_PARAM_TYPE_STRING 2
838
839/* CONDIS Miniport messages for connection oriented devices */
840/* that do not implement a call manager. */
841
842/* CoNdisMiniportCreateVc message */
843struct rcondis_mp_create_vc {
844 u32 req_id;
845 u32 ndis_vc_handle;
846};
847
848/* Response to CoNdisMiniportCreateVc */
849struct rcondis_mp_create_vc_complete {
850 u32 req_id;
851 u32 dev_vc_handle;
852 u32 status;
853};
854
855/* CoNdisMiniportDeleteVc message */
856struct rcondis_mp_delete_vc {
857 u32 req_id;
858 u32 dev_vc_handle;
859};
860
861/* Response to CoNdisMiniportDeleteVc */
862struct rcondis_mp_delete_vc_complete {
863 u32 req_id;
864 u32 status;
865};
866
867/* CoNdisMiniportQueryRequest message */
868struct rcondis_mp_query_request {
869 u32 req_id;
870 u32 request_type;
871 u32 oid;
872 u32 dev_vc_handle;
873 u32 info_buflen;
874 u32 info_buf_offset;
875};
876
877/* CoNdisMiniportSetRequest message */
878struct rcondis_mp_set_request {
879 u32 req_id;
880 u32 request_type;
881 u32 oid;
882 u32 dev_vc_handle;
883 u32 info_buflen;
884 u32 info_buf_offset;
885};
886
887/* CoNdisIndicateStatus message */
888struct rcondis_indicate_status {
889 u32 ndis_vc_handle;
890 u32 status;
891 u32 status_buflen;
892 u32 status_buf_offset;
893};
894
895/* CONDIS Call/VC parameters */
896struct rcondis_specific_parameters {
897 u32 parameter_type;
898 u32 parameter_length;
899 u32 parameter_lffset;
900};
901
902struct rcondis_media_parameters {
903 u32 flags;
904 u32 reserved1;
905 u32 reserved2;
906 struct rcondis_specific_parameters media_specific;
907};
908
909struct rndis_flowspec {
910 u32 token_rate;
911 u32 token_bucket_size;
912 u32 peak_bandwidth;
913 u32 latency;
914 u32 delay_variation;
915 u32 service_type;
916 u32 max_sdu_size;
917 u32 minimum_policed_size;
918};
919
920struct rcondis_call_manager_parameters {
921 struct rndis_flowspec transmit;
922 struct rndis_flowspec receive;
923 struct rcondis_specific_parameters call_mgr_specific;
924};
925
926/* CoNdisMiniportActivateVc message */
927struct rcondis_mp_activate_vc_request {
928 u32 req_id;
929 u32 flags;
930 u32 dev_vc_handle;
931 u32 media_params_offset;
932 u32 media_params_length;
933 u32 call_mgr_params_offset;
934 u32 call_mgr_params_length;
935};
936
937/* Response to CoNdisMiniportActivateVc */
938struct rcondis_mp_activate_vc_complete {
939 u32 req_id;
940 u32 status;
941};
942
943/* CoNdisMiniportDeactivateVc message */
944struct rcondis_mp_deactivate_vc_request {
945 u32 req_id;
946 u32 flags;
947 u32 dev_vc_handle;
948};
949
950/* Response to CoNdisMiniportDeactivateVc */
951struct rcondis_mp_deactivate_vc_complete {
952 u32 req_id;
953 u32 status;
954};
955
956
957/* union with all of the RNDIS messages */
958union rndis_message_container {
959 struct rndis_packet pkt;
960 struct rndis_initialize_request init_req;
961 struct rndis_halt_request halt_req;
962 struct rndis_query_request query_req;
963 struct rndis_set_request set_req;
964 struct rndis_reset_request reset_req;
965 struct rndis_keepalive_request keep_alive_req;
966 struct rndis_indicate_status indicate_status;
967 struct rndis_initialize_complete init_complete;
968 struct rndis_query_complete query_complete;
969 struct rndis_set_complete set_complete;
970 struct rndis_reset_complete reset_complete;
971 struct rndis_keepalive_complete keep_alive_complete;
972 struct rcondis_mp_create_vc co_miniport_create_vc;
973 struct rcondis_mp_delete_vc co_miniport_delete_vc;
974 struct rcondis_indicate_status co_indicate_status;
975 struct rcondis_mp_activate_vc_request co_miniport_activate_vc;
976 struct rcondis_mp_deactivate_vc_request co_miniport_deactivate_vc;
977 struct rcondis_mp_create_vc_complete co_miniport_create_vc_complete;
978 struct rcondis_mp_delete_vc_complete co_miniport_delete_vc_complete;
979 struct rcondis_mp_activate_vc_complete co_miniport_activate_vc_complete;
980 struct rcondis_mp_deactivate_vc_complete
981 co_miniport_deactivate_vc_complete;
982};
983
984/* Remote NDIS message format */
985struct rndis_message {
986 u32 ndis_msg_type;
987
988 /* Total length of this message, from the beginning */
989 /* of the sruct rndis_message, in bytes. */
990 u32 msg_len;
991
992 /* Actual message */
993 union rndis_message_container msg;
994};
995
996
997struct rndis_filter_packet {
998 void *completion_ctx;
999 void (*completion)(void *context);
1000 struct rndis_message msg;
1001};
1002
1003/* Handy macros */
1004
1005/* get the size of an RNDIS message. Pass in the message type, */
1006/* struct rndis_set_request, struct rndis_packet for example */
1007#define RNDIS_MESSAGE_SIZE(msg) \
1008 (sizeof(msg) + (sizeof(struct rndis_message) - \
1009 sizeof(union rndis_message_container)))
1010
1011/* get pointer to info buffer with message pointer */
1012#define MESSAGE_TO_INFO_BUFFER(msg) \
1013 (((unsigned char *)(msg)) + msg->info_buf_offset)
1014
1015/* get pointer to status buffer with message pointer */
1016#define MESSAGE_TO_STATUS_BUFFER(msg) \
1017 (((unsigned char *)(msg)) + msg->status_buf_offset)
1018
1019/* get pointer to OOBD buffer with message pointer */
1020#define MESSAGE_TO_OOBD_BUFFER(msg) \
1021 (((unsigned char *)(msg)) + msg->oob_data_offset)
1022
1023/* get pointer to data buffer with message pointer */
1024#define MESSAGE_TO_DATA_BUFFER(msg) \
1025 (((unsigned char *)(msg)) + msg->per_pkt_info_offset)
1026
1027/* get pointer to contained message from NDIS_MESSAGE pointer */
1028#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg) \
1029 ((void *) &rndis_msg->msg)
1030
1031/* get pointer to contained message from NDIS_MESSAGE pointer */
1032#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg) \
1033 ((void *) rndis_msg)
1034
1035
1036#define __struct_bcount(x)
1037
1038
1039
1040#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
1041 sizeof(union rndis_message_container))
1042
1043#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
1044#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
1045#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
1046#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
1047#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
1048#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
1049#define NDIS_PACKET_TYPE_SMT 0x00000040
1050#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
1051#define NDIS_PACKET_TYPE_GROUP 0x00000100
1052#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00000200
1053#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
1054#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
1055
1056
1057
1058#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
new file mode 100644
index 000000000000..28e69a6c74a1
--- /dev/null
+++ b/drivers/net/hyperv/netvsc.c
@@ -0,0 +1,939 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/wait.h>
26#include <linux/mm.h>
27#include <linux/delay.h>
28#include <linux/io.h>
29#include <linux/slab.h>
30#include <linux/netdevice.h>
31
32#include "hyperv_net.h"
33
34
35static struct netvsc_device *alloc_net_device(struct hv_device *device)
36{
37 struct netvsc_device *net_device;
38 struct net_device *ndev = hv_get_drvdata(device);
39
40 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
41 if (!net_device)
42 return NULL;
43
44
45 net_device->destroy = false;
46 net_device->dev = device;
47 net_device->ndev = ndev;
48
49 hv_set_drvdata(device, net_device);
50 return net_device;
51}
52
53static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
54{
55 struct netvsc_device *net_device;
56
57 net_device = hv_get_drvdata(device);
58 if (net_device && net_device->destroy)
59 net_device = NULL;
60
61 return net_device;
62}
63
64static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
65{
66 struct netvsc_device *net_device;
67
68 net_device = hv_get_drvdata(device);
69
70 if (!net_device)
71 goto get_in_err;
72
73 if (net_device->destroy &&
74 atomic_read(&net_device->num_outstanding_sends) == 0)
75 net_device = NULL;
76
77get_in_err:
78 return net_device;
79}
80
81
82static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
83{
84 struct nvsp_message *revoke_packet;
85 int ret = 0;
86 struct net_device *ndev = net_device->ndev;
87
88 /*
89 * If we got a section count, it means we received a
90 * SendReceiveBufferComplete msg (ie sent
91 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
92 * to send a revoke msg here
93 */
94 if (net_device->recv_section_cnt) {
95 /* Send the revoke receive buffer */
96 revoke_packet = &net_device->revoke_packet;
97 memset(revoke_packet, 0, sizeof(struct nvsp_message));
98
99 revoke_packet->hdr.msg_type =
100 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
101 revoke_packet->msg.v1_msg.
102 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
103
104 ret = vmbus_sendpacket(net_device->dev->channel,
105 revoke_packet,
106 sizeof(struct nvsp_message),
107 (unsigned long)revoke_packet,
108 VM_PKT_DATA_INBAND, 0);
109 /*
110 * If we failed here, we might as well return and
111 * have a leak rather than continue and a bugchk
112 */
113 if (ret != 0) {
114 netdev_err(ndev, "unable to send "
115 "revoke receive buffer to netvsp\n");
116 return ret;
117 }
118 }
119
120 /* Teardown the gpadl on the vsp end */
121 if (net_device->recv_buf_gpadl_handle) {
122 ret = vmbus_teardown_gpadl(net_device->dev->channel,
123 net_device->recv_buf_gpadl_handle);
124
125 /* If we failed here, we might as well return and have a leak
126 * rather than continue and a bugchk
127 */
128 if (ret != 0) {
129 netdev_err(ndev,
130 "unable to teardown receive buffer's gpadl\n");
131 return ret;
132 }
133 net_device->recv_buf_gpadl_handle = 0;
134 }
135
136 if (net_device->recv_buf) {
137 /* Free up the receive buffer */
138 free_pages((unsigned long)net_device->recv_buf,
139 get_order(net_device->recv_buf_size));
140 net_device->recv_buf = NULL;
141 }
142
143 if (net_device->recv_section) {
144 net_device->recv_section_cnt = 0;
145 kfree(net_device->recv_section);
146 net_device->recv_section = NULL;
147 }
148
149 return ret;
150}
151
152static int netvsc_init_recv_buf(struct hv_device *device)
153{
154 int ret = 0;
155 int t;
156 struct netvsc_device *net_device;
157 struct nvsp_message *init_packet;
158 struct net_device *ndev;
159
160 net_device = get_outbound_net_device(device);
161 if (!net_device)
162 return -ENODEV;
163 ndev = net_device->ndev;
164
165 net_device->recv_buf =
166 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
167 get_order(net_device->recv_buf_size));
168 if (!net_device->recv_buf) {
169 netdev_err(ndev, "unable to allocate receive "
170 "buffer of size %d\n", net_device->recv_buf_size);
171 ret = -ENOMEM;
172 goto cleanup;
173 }
174
175 /*
176 * Establish the gpadl handle for this buffer on this
177 * channel. Note: This call uses the vmbus connection rather
178 * than the channel to establish the gpadl handle.
179 */
180 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
181 net_device->recv_buf_size,
182 &net_device->recv_buf_gpadl_handle);
183 if (ret != 0) {
184 netdev_err(ndev,
185 "unable to establish receive buffer's gpadl\n");
186 goto cleanup;
187 }
188
189
190 /* Notify the NetVsp of the gpadl handle */
191 init_packet = &net_device->channel_init_pkt;
192
193 memset(init_packet, 0, sizeof(struct nvsp_message));
194
195 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
196 init_packet->msg.v1_msg.send_recv_buf.
197 gpadl_handle = net_device->recv_buf_gpadl_handle;
198 init_packet->msg.v1_msg.
199 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
200
201 /* Send the gpadl notification request */
202 ret = vmbus_sendpacket(device->channel, init_packet,
203 sizeof(struct nvsp_message),
204 (unsigned long)init_packet,
205 VM_PKT_DATA_INBAND,
206 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
207 if (ret != 0) {
208 netdev_err(ndev,
209 "unable to send receive buffer's gpadl to netvsp\n");
210 goto cleanup;
211 }
212
213 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
214 BUG_ON(t == 0);
215
216
217 /* Check the response */
218 if (init_packet->msg.v1_msg.
219 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
220 netdev_err(ndev, "Unable to complete receive buffer "
221 "initialization with NetVsp - status %d\n",
222 init_packet->msg.v1_msg.
223 send_recv_buf_complete.status);
224 ret = -EINVAL;
225 goto cleanup;
226 }
227
228 /* Parse the response */
229
230 net_device->recv_section_cnt = init_packet->msg.
231 v1_msg.send_recv_buf_complete.num_sections;
232
233 net_device->recv_section = kmemdup(init_packet->msg.v1_msg.send_recv_buf_complete.sections,
234 net_device->recv_section_cnt * sizeof(struct nvsp_1_receive_buffer_section),
235 GFP_KERNEL);
236 if (net_device->recv_section == NULL) {
237 ret = -EINVAL;
238 goto cleanup;
239 }
240
241 /*
242 * For 1st release, there should only be 1 section that represents the
243 * entire receive buffer
244 */
245 if (net_device->recv_section_cnt != 1 ||
246 net_device->recv_section->offset != 0) {
247 ret = -EINVAL;
248 goto cleanup;
249 }
250
251 goto exit;
252
253cleanup:
254 netvsc_destroy_recv_buf(net_device);
255
256exit:
257 return ret;
258}
259
260
261static int netvsc_connect_vsp(struct hv_device *device)
262{
263 int ret, t;
264 struct netvsc_device *net_device;
265 struct nvsp_message *init_packet;
266 int ndis_version;
267 struct net_device *ndev;
268
269 net_device = get_outbound_net_device(device);
270 if (!net_device)
271 return -ENODEV;
272 ndev = net_device->ndev;
273
274 init_packet = &net_device->channel_init_pkt;
275
276 memset(init_packet, 0, sizeof(struct nvsp_message));
277 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
278 init_packet->msg.init_msg.init.min_protocol_ver =
279 NVSP_MIN_PROTOCOL_VERSION;
280 init_packet->msg.init_msg.init.max_protocol_ver =
281 NVSP_MAX_PROTOCOL_VERSION;
282
283 /* Send the init request */
284 ret = vmbus_sendpacket(device->channel, init_packet,
285 sizeof(struct nvsp_message),
286 (unsigned long)init_packet,
287 VM_PKT_DATA_INBAND,
288 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
289
290 if (ret != 0)
291 goto cleanup;
292
293 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
294
295 if (t == 0) {
296 ret = -ETIMEDOUT;
297 goto cleanup;
298 }
299
300 if (init_packet->msg.init_msg.init_complete.status !=
301 NVSP_STAT_SUCCESS) {
302 ret = -EINVAL;
303 goto cleanup;
304 }
305
306 if (init_packet->msg.init_msg.init_complete.
307 negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
308 ret = -EPROTO;
309 goto cleanup;
310 }
311 /* Send the ndis version */
312 memset(init_packet, 0, sizeof(struct nvsp_message));
313
314 ndis_version = 0x00050000;
315
316 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
317 init_packet->msg.v1_msg.
318 send_ndis_ver.ndis_major_ver =
319 (ndis_version & 0xFFFF0000) >> 16;
320 init_packet->msg.v1_msg.
321 send_ndis_ver.ndis_minor_ver =
322 ndis_version & 0xFFFF;
323
324 /* Send the init request */
325 ret = vmbus_sendpacket(device->channel, init_packet,
326 sizeof(struct nvsp_message),
327 (unsigned long)init_packet,
328 VM_PKT_DATA_INBAND, 0);
329 if (ret != 0)
330 goto cleanup;
331
332 /* Post the big receive buffer to NetVSP */
333 ret = netvsc_init_recv_buf(device);
334
335cleanup:
336 return ret;
337}
338
339static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
340{
341 netvsc_destroy_recv_buf(net_device);
342}
343
344/*
345 * netvsc_device_remove - Callback when the root bus device is removed
346 */
347int netvsc_device_remove(struct hv_device *device)
348{
349 struct netvsc_device *net_device;
350 struct hv_netvsc_packet *netvsc_packet, *pos;
351 unsigned long flags;
352
353 net_device = hv_get_drvdata(device);
354 spin_lock_irqsave(&device->channel->inbound_lock, flags);
355 net_device->destroy = true;
356 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
357
358 /* Wait for all send completions */
359 while (atomic_read(&net_device->num_outstanding_sends)) {
360 dev_info(&device->device,
361 "waiting for %d requests to complete...\n",
362 atomic_read(&net_device->num_outstanding_sends));
363 udelay(100);
364 }
365
366 netvsc_disconnect_vsp(net_device);
367
368 /*
369 * Since we have already drained, we don't need to busy wait
370 * as was done in final_release_stor_device()
371 * Note that we cannot set the ext pointer to NULL until
372 * we have drained - to drain the outgoing packets, we need to
373 * allow incoming packets.
374 */
375
376 spin_lock_irqsave(&device->channel->inbound_lock, flags);
377 hv_set_drvdata(device, NULL);
378 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
379
380 /*
381 * At this point, no one should be accessing net_device
382 * except in here
383 */
384 dev_notice(&device->device, "net device safe to remove\n");
385
386 /* Now, we can close the channel safely */
387 vmbus_close(device->channel);
388
389 /* Release all resources */
390 list_for_each_entry_safe(netvsc_packet, pos,
391 &net_device->recv_pkt_list, list_ent) {
392 list_del(&netvsc_packet->list_ent);
393 kfree(netvsc_packet);
394 }
395
396 kfree(net_device);
397 return 0;
398}
399
400static void netvsc_send_completion(struct hv_device *device,
401 struct vmpacket_descriptor *packet)
402{
403 struct netvsc_device *net_device;
404 struct nvsp_message *nvsp_packet;
405 struct hv_netvsc_packet *nvsc_packet;
406 struct net_device *ndev;
407
408 net_device = get_inbound_net_device(device);
409 if (!net_device)
410 return;
411 ndev = net_device->ndev;
412
413 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
414 (packet->offset8 << 3));
415
416 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
417 (nvsp_packet->hdr.msg_type ==
418 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
419 (nvsp_packet->hdr.msg_type ==
420 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
421 /* Copy the response back */
422 memcpy(&net_device->channel_init_pkt, nvsp_packet,
423 sizeof(struct nvsp_message));
424 complete(&net_device->channel_init_wait);
425 } else if (nvsp_packet->hdr.msg_type ==
426 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
427 /* Get the send context */
428 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
429 packet->trans_id;
430
431 /* Notify the layer above us */
432 nvsc_packet->completion.send.send_completion(
433 nvsc_packet->completion.send.send_completion_ctx);
434
435 atomic_dec(&net_device->num_outstanding_sends);
436 } else {
437 netdev_err(ndev, "Unknown send completion packet type- "
438 "%d received!!\n", nvsp_packet->hdr.msg_type);
439 }
440
441}
442
443int netvsc_send(struct hv_device *device,
444 struct hv_netvsc_packet *packet)
445{
446 struct netvsc_device *net_device;
447 int ret = 0;
448 struct nvsp_message sendMessage;
449 struct net_device *ndev;
450
451 net_device = get_outbound_net_device(device);
452 if (!net_device)
453 return -ENODEV;
454 ndev = net_device->ndev;
455
456 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
457 if (packet->is_data_pkt) {
458 /* 0 is RMC_DATA; */
459 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
460 } else {
461 /* 1 is RMC_CONTROL; */
462 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
463 }
464
465 /* Not using send buffer section */
466 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
467 0xFFFFFFFF;
468 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
469
470 if (packet->page_buf_cnt) {
471 ret = vmbus_sendpacket_pagebuffer(device->channel,
472 packet->page_buf,
473 packet->page_buf_cnt,
474 &sendMessage,
475 sizeof(struct nvsp_message),
476 (unsigned long)packet);
477 } else {
478 ret = vmbus_sendpacket(device->channel, &sendMessage,
479 sizeof(struct nvsp_message),
480 (unsigned long)packet,
481 VM_PKT_DATA_INBAND,
482 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
483
484 }
485
486 if (ret != 0)
487 netdev_err(ndev, "Unable to send packet %p ret %d\n",
488 packet, ret);
489 else
490 atomic_inc(&net_device->num_outstanding_sends);
491
492 return ret;
493}
494
495static void netvsc_send_recv_completion(struct hv_device *device,
496 u64 transaction_id)
497{
498 struct nvsp_message recvcompMessage;
499 int retries = 0;
500 int ret;
501 struct net_device *ndev;
502 struct netvsc_device *net_device = hv_get_drvdata(device);
503
504 ndev = net_device->ndev;
505
506 recvcompMessage.hdr.msg_type =
507 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
508
509 /* FIXME: Pass in the status */
510 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
511 NVSP_STAT_SUCCESS;
512
513retry_send_cmplt:
514 /* Send the completion */
515 ret = vmbus_sendpacket(device->channel, &recvcompMessage,
516 sizeof(struct nvsp_message), transaction_id,
517 VM_PKT_COMP, 0);
518 if (ret == 0) {
519 /* success */
520 /* no-op */
521 } else if (ret == -EAGAIN) {
522 /* no more room...wait a bit and attempt to retry 3 times */
523 retries++;
524 netdev_err(ndev, "unable to send receive completion pkt"
525 " (tid %llx)...retrying %d\n", transaction_id, retries);
526
527 if (retries < 4) {
528 udelay(100);
529 goto retry_send_cmplt;
530 } else {
531 netdev_err(ndev, "unable to send receive "
532 "completion pkt (tid %llx)...give up retrying\n",
533 transaction_id);
534 }
535 } else {
536 netdev_err(ndev, "unable to send receive "
537 "completion pkt - %llx\n", transaction_id);
538 }
539}
540
541/* Send a receive completion packet to RNDIS device (ie NetVsp) */
542static void netvsc_receive_completion(void *context)
543{
544 struct hv_netvsc_packet *packet = context;
545 struct hv_device *device = (struct hv_device *)packet->device;
546 struct netvsc_device *net_device;
547 u64 transaction_id = 0;
548 bool fsend_receive_comp = false;
549 unsigned long flags;
550 struct net_device *ndev;
551
552 /*
553 * Even though it seems logical to do a GetOutboundNetDevice() here to
554 * send out receive completion, we are using GetInboundNetDevice()
555 * since we may have disable outbound traffic already.
556 */
557 net_device = get_inbound_net_device(device);
558 if (!net_device)
559 return;
560 ndev = net_device->ndev;
561
562 /* Overloading use of the lock. */
563 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
564
565 packet->xfer_page_pkt->count--;
566
567 /*
568 * Last one in the line that represent 1 xfer page packet.
569 * Return the xfer page packet itself to the freelist
570 */
571 if (packet->xfer_page_pkt->count == 0) {
572 fsend_receive_comp = true;
573 transaction_id = packet->completion.recv.recv_completion_tid;
574 list_add_tail(&packet->xfer_page_pkt->list_ent,
575 &net_device->recv_pkt_list);
576
577 }
578
579 /* Put the packet back */
580 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
581 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
582
583 /* Send a receive completion for the xfer page packet */
584 if (fsend_receive_comp)
585 netvsc_send_recv_completion(device, transaction_id);
586
587}
588
589static void netvsc_receive(struct hv_device *device,
590 struct vmpacket_descriptor *packet)
591{
592 struct netvsc_device *net_device;
593 struct vmtransfer_page_packet_header *vmxferpage_packet;
594 struct nvsp_message *nvsp_packet;
595 struct hv_netvsc_packet *netvsc_packet = NULL;
596 unsigned long start;
597 unsigned long end, end_virtual;
598 /* struct netvsc_driver *netvscDriver; */
599 struct xferpage_packet *xferpage_packet = NULL;
600 int i, j;
601 int count = 0, bytes_remain = 0;
602 unsigned long flags;
603 struct net_device *ndev;
604
605 LIST_HEAD(listHead);
606
607 net_device = get_inbound_net_device(device);
608 if (!net_device)
609 return;
610 ndev = net_device->ndev;
611
612 /*
613 * All inbound packets other than send completion should be xfer page
614 * packet
615 */
616 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
617 netdev_err(ndev, "Unknown packet type received - %d\n",
618 packet->type);
619 return;
620 }
621
622 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
623 (packet->offset8 << 3));
624
625 /* Make sure this is a valid nvsp packet */
626 if (nvsp_packet->hdr.msg_type !=
627 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
628 netdev_err(ndev, "Unknown nvsp packet type received-"
629 " %d\n", nvsp_packet->hdr.msg_type);
630 return;
631 }
632
633 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
634
635 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
636 netdev_err(ndev, "Invalid xfer page set id - "
637 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
638 vmxferpage_packet->xfer_pageset_id);
639 return;
640 }
641
642 /*
643 * Grab free packets (range count + 1) to represent this xfer
644 * page packet. +1 to represent the xfer page packet itself.
645 * We grab it here so that we know exactly how many we can
646 * fulfil
647 */
648 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
649 while (!list_empty(&net_device->recv_pkt_list)) {
650 list_move_tail(net_device->recv_pkt_list.next, &listHead);
651 if (++count == vmxferpage_packet->range_cnt + 1)
652 break;
653 }
654 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
655
656 /*
657 * We need at least 2 netvsc pkts (1 to represent the xfer
658 * page and at least 1 for the range) i.e. we can handled
659 * some of the xfer page packet ranges...
660 */
661 if (count < 2) {
662 netdev_err(ndev, "Got only %d netvsc pkt...needed "
663 "%d pkts. Dropping this xfer page packet completely!\n",
664 count, vmxferpage_packet->range_cnt + 1);
665
666 /* Return it to the freelist */
667 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
668 for (i = count; i != 0; i--) {
669 list_move_tail(listHead.next,
670 &net_device->recv_pkt_list);
671 }
672 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
673 flags);
674
675 netvsc_send_recv_completion(device,
676 vmxferpage_packet->d.trans_id);
677
678 return;
679 }
680
681 /* Remove the 1st packet to represent the xfer page packet itself */
682 xferpage_packet = (struct xferpage_packet *)listHead.next;
683 list_del(&xferpage_packet->list_ent);
684
685 /* This is how much we can satisfy */
686 xferpage_packet->count = count - 1;
687
688 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
689 netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
690 "this xfer page...got %d\n",
691 vmxferpage_packet->range_cnt, xferpage_packet->count);
692 }
693
694 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
695 for (i = 0; i < (count - 1); i++) {
696 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
697 list_del(&netvsc_packet->list_ent);
698
699 /* Initialize the netvsc packet */
700 netvsc_packet->xfer_page_pkt = xferpage_packet;
701 netvsc_packet->completion.recv.recv_completion =
702 netvsc_receive_completion;
703 netvsc_packet->completion.recv.recv_completion_ctx =
704 netvsc_packet;
705 netvsc_packet->device = device;
706 /* Save this so that we can send it back */
707 netvsc_packet->completion.recv.recv_completion_tid =
708 vmxferpage_packet->d.trans_id;
709
710 netvsc_packet->total_data_buflen =
711 vmxferpage_packet->ranges[i].byte_count;
712 netvsc_packet->page_buf_cnt = 1;
713
714 netvsc_packet->page_buf[0].len =
715 vmxferpage_packet->ranges[i].byte_count;
716
717 start = virt_to_phys((void *)((unsigned long)net_device->
718 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
719
720 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
721 end_virtual = (unsigned long)net_device->recv_buf
722 + vmxferpage_packet->ranges[i].byte_offset
723 + vmxferpage_packet->ranges[i].byte_count - 1;
724 end = virt_to_phys((void *)end_virtual);
725
726 /* Calculate the page relative offset */
727 netvsc_packet->page_buf[0].offset =
728 vmxferpage_packet->ranges[i].byte_offset &
729 (PAGE_SIZE - 1);
730 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
731 /* Handle frame across multiple pages: */
732 netvsc_packet->page_buf[0].len =
733 (netvsc_packet->page_buf[0].pfn <<
734 PAGE_SHIFT)
735 + PAGE_SIZE - start;
736 bytes_remain = netvsc_packet->total_data_buflen -
737 netvsc_packet->page_buf[0].len;
738 for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
739 netvsc_packet->page_buf[j].offset = 0;
740 if (bytes_remain <= PAGE_SIZE) {
741 netvsc_packet->page_buf[j].len =
742 bytes_remain;
743 bytes_remain = 0;
744 } else {
745 netvsc_packet->page_buf[j].len =
746 PAGE_SIZE;
747 bytes_remain -= PAGE_SIZE;
748 }
749 netvsc_packet->page_buf[j].pfn =
750 virt_to_phys((void *)(end_virtual -
751 bytes_remain)) >> PAGE_SHIFT;
752 netvsc_packet->page_buf_cnt++;
753 if (bytes_remain == 0)
754 break;
755 }
756 }
757
758 /* Pass it to the upper layer */
759 rndis_filter_receive(device, netvsc_packet);
760
761 netvsc_receive_completion(netvsc_packet->
762 completion.recv.recv_completion_ctx);
763 }
764
765}
766
767static void netvsc_channel_cb(void *context)
768{
769 int ret;
770 struct hv_device *device = context;
771 struct netvsc_device *net_device;
772 u32 bytes_recvd;
773 u64 request_id;
774 unsigned char *packet;
775 struct vmpacket_descriptor *desc;
776 unsigned char *buffer;
777 int bufferlen = NETVSC_PACKET_SIZE;
778 struct net_device *ndev;
779
780 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
781 GFP_ATOMIC);
782 if (!packet)
783 return;
784 buffer = packet;
785
786 net_device = get_inbound_net_device(device);
787 if (!net_device)
788 goto out;
789 ndev = net_device->ndev;
790
791 do {
792 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
793 &bytes_recvd, &request_id);
794 if (ret == 0) {
795 if (bytes_recvd > 0) {
796 desc = (struct vmpacket_descriptor *)buffer;
797 switch (desc->type) {
798 case VM_PKT_COMP:
799 netvsc_send_completion(device, desc);
800 break;
801
802 case VM_PKT_DATA_USING_XFER_PAGES:
803 netvsc_receive(device, desc);
804 break;
805
806 default:
807 netdev_err(ndev,
808 "unhandled packet type %d, "
809 "tid %llx len %d\n",
810 desc->type, request_id,
811 bytes_recvd);
812 break;
813 }
814
815 /* reset */
816 if (bufferlen > NETVSC_PACKET_SIZE) {
817 kfree(buffer);
818 buffer = packet;
819 bufferlen = NETVSC_PACKET_SIZE;
820 }
821 } else {
822 /* reset */
823 if (bufferlen > NETVSC_PACKET_SIZE) {
824 kfree(buffer);
825 buffer = packet;
826 bufferlen = NETVSC_PACKET_SIZE;
827 }
828
829 break;
830 }
831 } else if (ret == -ENOBUFS) {
832 /* Handle large packet */
833 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
834 if (buffer == NULL) {
835 /* Try again next time around */
836 netdev_err(ndev,
837 "unable to allocate buffer of size "
838 "(%d)!!\n", bytes_recvd);
839 break;
840 }
841
842 bufferlen = bytes_recvd;
843 }
844 } while (1);
845
846out:
847 kfree(buffer);
848 return;
849}
850
851/*
852 * netvsc_device_add - Callback when the device belonging to this
853 * driver is added
854 */
855int netvsc_device_add(struct hv_device *device, void *additional_info)
856{
857 int ret = 0;
858 int i;
859 int ring_size =
860 ((struct netvsc_device_info *)additional_info)->ring_size;
861 struct netvsc_device *net_device;
862 struct hv_netvsc_packet *packet, *pos;
863 struct net_device *ndev;
864
865 net_device = alloc_net_device(device);
866 if (!net_device) {
867 ret = -ENOMEM;
868 goto cleanup;
869 }
870
871 /*
872 * Coming into this function, struct net_device * is
873 * registered as the driver private data.
874 * In alloc_net_device(), we register struct netvsc_device *
875 * as the driver private data and stash away struct net_device *
876 * in struct netvsc_device *.
877 */
878 ndev = net_device->ndev;
879
880 /* Initialize the NetVSC channel extension */
881 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
882 spin_lock_init(&net_device->recv_pkt_list_lock);
883
884 INIT_LIST_HEAD(&net_device->recv_pkt_list);
885
886 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
887 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
888 (NETVSC_RECEIVE_SG_COUNT *
889 sizeof(struct hv_page_buffer)), GFP_KERNEL);
890 if (!packet)
891 break;
892
893 list_add_tail(&packet->list_ent,
894 &net_device->recv_pkt_list);
895 }
896 init_completion(&net_device->channel_init_wait);
897
898 /* Open the channel */
899 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
900 ring_size * PAGE_SIZE, NULL, 0,
901 netvsc_channel_cb, device);
902
903 if (ret != 0) {
904 netdev_err(ndev, "unable to open channel: %d\n", ret);
905 goto cleanup;
906 }
907
908 /* Channel is opened */
909 pr_info("hv_netvsc channel opened successfully\n");
910
911 /* Connect with the NetVsp */
912 ret = netvsc_connect_vsp(device);
913 if (ret != 0) {
914 netdev_err(ndev,
915 "unable to connect to NetVSP - %d\n", ret);
916 goto close;
917 }
918
919 return ret;
920
921close:
922 /* Now, we can close the channel safely */
923 vmbus_close(device->channel);
924
925cleanup:
926
927 if (net_device) {
928 list_for_each_entry_safe(packet, pos,
929 &net_device->recv_pkt_list,
930 list_ent) {
931 list_del(&packet->list_ent);
932 kfree(packet);
933 }
934
935 kfree(net_device);
936 }
937
938 return ret;
939}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
new file mode 100644
index 000000000000..93b0e91cbf98
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -0,0 +1,456 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/init.h>
24#include <linux/atomic.h>
25#include <linux/module.h>
26#include <linux/highmem.h>
27#include <linux/device.h>
28#include <linux/io.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/inetdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/in.h>
35#include <linux/slab.h>
36#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
40
41#include "hyperv_net.h"
42
43struct net_device_context {
44 /* point back to our device context */
45 struct hv_device *device_ctx;
46 atomic_t avail;
47 struct delayed_work dwork;
48};
49
50
51#define PACKET_PAGES_LOWATER 8
52/* Need this many pages to handle worst case fragmented packet */
53#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
54
55static int ring_size = 128;
56module_param(ring_size, int, S_IRUGO);
57MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
58
59/* no-op so the netdev core doesn't return -EINVAL when modifying the the
60 * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
61 * when it calls RndisFilterOnOpen() */
62static void netvsc_set_multicast_list(struct net_device *net)
63{
64}
65
66static int netvsc_open(struct net_device *net)
67{
68 struct net_device_context *net_device_ctx = netdev_priv(net);
69 struct hv_device *device_obj = net_device_ctx->device_ctx;
70 int ret = 0;
71
72 /* Open up the device */
73 ret = rndis_filter_open(device_obj);
74 if (ret != 0) {
75 netdev_err(net, "unable to open device (ret %d).\n", ret);
76 return ret;
77 }
78
79 netif_start_queue(net);
80
81 return ret;
82}
83
84static int netvsc_close(struct net_device *net)
85{
86 struct net_device_context *net_device_ctx = netdev_priv(net);
87 struct hv_device *device_obj = net_device_ctx->device_ctx;
88 int ret;
89
90 netif_stop_queue(net);
91
92 ret = rndis_filter_close(device_obj);
93 if (ret != 0)
94 netdev_err(net, "unable to close device (ret %d).\n", ret);
95
96 return ret;
97}
98
99static void netvsc_xmit_completion(void *context)
100{
101 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
102 struct sk_buff *skb = (struct sk_buff *)
103 (unsigned long)packet->completion.send.send_completion_tid;
104
105 kfree(packet);
106
107 if (skb) {
108 struct net_device *net = skb->dev;
109 struct net_device_context *net_device_ctx = netdev_priv(net);
110 unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
111
112 dev_kfree_skb_any(skb);
113
114 atomic_add(num_pages, &net_device_ctx->avail);
115 if (atomic_read(&net_device_ctx->avail) >=
116 PACKET_PAGES_HIWATER)
117 netif_wake_queue(net);
118 }
119}
120
121static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
122{
123 struct net_device_context *net_device_ctx = netdev_priv(net);
124 struct hv_netvsc_packet *packet;
125 int ret;
126 unsigned int i, num_pages;
127
128 /* Add 1 for skb->data and additional one for RNDIS */
129 num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
130 if (num_pages > atomic_read(&net_device_ctx->avail))
131 return NETDEV_TX_BUSY;
132
133 /* Allocate a netvsc packet based on # of frags. */
134 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
135 (num_pages * sizeof(struct hv_page_buffer)) +
136 sizeof(struct rndis_filter_packet), GFP_ATOMIC);
137 if (!packet) {
138 /* out of memory, drop packet */
139 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
140
141 dev_kfree_skb(skb);
142 net->stats.tx_dropped++;
143 return NETDEV_TX_BUSY;
144 }
145
146 packet->extension = (void *)(unsigned long)packet +
147 sizeof(struct hv_netvsc_packet) +
148 (num_pages * sizeof(struct hv_page_buffer));
149
150 /* Setup the rndis header */
151 packet->page_buf_cnt = num_pages;
152
153 /* Initialize it from the skb */
154 packet->total_data_buflen = skb->len;
155
156 /* Start filling in the page buffers starting after RNDIS buffer. */
157 packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
158 packet->page_buf[1].offset
159 = (unsigned long)skb->data & (PAGE_SIZE - 1);
160 packet->page_buf[1].len = skb_headlen(skb);
161
162 /* Additional fragments are after SKB data */
163 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
164 const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
165
166 packet->page_buf[i+2].pfn = page_to_pfn(skb_frag_page(f));
167 packet->page_buf[i+2].offset = f->page_offset;
168 packet->page_buf[i+2].len = skb_frag_size(f);
169 }
170
171 /* Set the completion routine */
172 packet->completion.send.send_completion = netvsc_xmit_completion;
173 packet->completion.send.send_completion_ctx = packet;
174 packet->completion.send.send_completion_tid = (unsigned long)skb;
175
176 ret = rndis_filter_send(net_device_ctx->device_ctx,
177 packet);
178 if (ret == 0) {
179 net->stats.tx_bytes += skb->len;
180 net->stats.tx_packets++;
181
182 atomic_sub(num_pages, &net_device_ctx->avail);
183 if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
184 netif_stop_queue(net);
185 } else {
186 /* we are shutting down or bus overloaded, just drop packet */
187 net->stats.tx_dropped++;
188 kfree(packet);
189 dev_kfree_skb_any(skb);
190 }
191
192 return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
193}
194
195/*
196 * netvsc_linkstatus_callback - Link up/down notification
197 */
198void netvsc_linkstatus_callback(struct hv_device *device_obj,
199 unsigned int status)
200{
201 struct net_device *net;
202 struct net_device_context *ndev_ctx;
203 struct netvsc_device *net_device;
204
205 net_device = hv_get_drvdata(device_obj);
206 net = net_device->ndev;
207
208 if (!net) {
209 netdev_err(net, "got link status but net device "
210 "not initialized yet\n");
211 return;
212 }
213
214 if (status == 1) {
215 netif_carrier_on(net);
216 netif_wake_queue(net);
217 ndev_ctx = netdev_priv(net);
218 schedule_delayed_work(&ndev_ctx->dwork, 0);
219 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
220 } else {
221 netif_carrier_off(net);
222 netif_stop_queue(net);
223 }
224}
225
226/*
227 * netvsc_recv_callback - Callback when we receive a packet from the
228 * "wire" on the specified device.
229 */
230int netvsc_recv_callback(struct hv_device *device_obj,
231 struct hv_netvsc_packet *packet)
232{
233 struct net_device *net = dev_get_drvdata(&device_obj->device);
234 struct sk_buff *skb;
235 void *data;
236 int i;
237 unsigned long flags;
238 struct netvsc_device *net_device;
239
240 net_device = hv_get_drvdata(device_obj);
241 net = net_device->ndev;
242
243 if (!net) {
244 netdev_err(net, "got receive callback but net device"
245 " not initialized yet\n");
246 return 0;
247 }
248
249 /* Allocate a skb - TODO direct I/O to pages? */
250 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
251 if (unlikely(!skb)) {
252 ++net->stats.rx_dropped;
253 return 0;
254 }
255
256 /* for kmap_atomic */
257 local_irq_save(flags);
258
259 /*
260 * Copy to skb. This copy is needed here since the memory pointed by
261 * hv_netvsc_packet cannot be deallocated
262 */
263 for (i = 0; i < packet->page_buf_cnt; i++) {
264 data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
265 KM_IRQ1);
266 data = (void *)(unsigned long)data +
267 packet->page_buf[i].offset;
268
269 memcpy(skb_put(skb, packet->page_buf[i].len), data,
270 packet->page_buf[i].len);
271
272 kunmap_atomic((void *)((unsigned long)data -
273 packet->page_buf[i].offset), KM_IRQ1);
274 }
275
276 local_irq_restore(flags);
277
278 skb->protocol = eth_type_trans(skb, net);
279 skb->ip_summed = CHECKSUM_NONE;
280
281 net->stats.rx_packets++;
282 net->stats.rx_bytes += skb->len;
283
284 /*
285 * Pass the skb back up. Network stack will deallocate the skb when it
286 * is done.
287 * TODO - use NAPI?
288 */
289 netif_rx(skb);
290
291 return 0;
292}
293
294static void netvsc_get_drvinfo(struct net_device *net,
295 struct ethtool_drvinfo *info)
296{
297 strcpy(info->driver, "hv_netvsc");
298 strcpy(info->version, HV_DRV_VERSION);
299 strcpy(info->fw_version, "N/A");
300}
301
302static const struct ethtool_ops ethtool_ops = {
303 .get_drvinfo = netvsc_get_drvinfo,
304 .get_link = ethtool_op_get_link,
305};
306
307static const struct net_device_ops device_ops = {
308 .ndo_open = netvsc_open,
309 .ndo_stop = netvsc_close,
310 .ndo_start_xmit = netvsc_start_xmit,
311 .ndo_set_rx_mode = netvsc_set_multicast_list,
312 .ndo_change_mtu = eth_change_mtu,
313 .ndo_validate_addr = eth_validate_addr,
314 .ndo_set_mac_address = eth_mac_addr,
315};
316
317/*
318 * Send GARP packet to network peers after migrations.
319 * After Quick Migration, the network is not immediately operational in the
320 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
321 * another netif_notify_peers() into a delayed work, otherwise GARP packet
322 * will not be sent after quick migration, and cause network disconnection.
323 */
324static void netvsc_send_garp(struct work_struct *w)
325{
326 struct net_device_context *ndev_ctx;
327 struct net_device *net;
328 struct netvsc_device *net_device;
329
330 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
331 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
332 net = net_device->ndev;
333 netif_notify_peers(net);
334}
335
336
337static int netvsc_probe(struct hv_device *dev,
338 const struct hv_vmbus_device_id *dev_id)
339{
340 struct net_device *net = NULL;
341 struct net_device_context *net_device_ctx;
342 struct netvsc_device_info device_info;
343 int ret;
344
345 net = alloc_etherdev(sizeof(struct net_device_context));
346 if (!net)
347 return -ENOMEM;
348
349 /* Set initial state */
350 netif_carrier_off(net);
351
352 net_device_ctx = netdev_priv(net);
353 net_device_ctx->device_ctx = dev;
354 atomic_set(&net_device_ctx->avail, ring_size);
355 hv_set_drvdata(dev, net);
356 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
357
358 net->netdev_ops = &device_ops;
359
360 /* TODO: Add GSO and Checksum offload */
361 net->hw_features = NETIF_F_SG;
362 net->features = NETIF_F_SG;
363
364 SET_ETHTOOL_OPS(net, &ethtool_ops);
365 SET_NETDEV_DEV(net, &dev->device);
366
367 ret = register_netdev(net);
368 if (ret != 0) {
369 pr_err("Unable to register netdev.\n");
370 free_netdev(net);
371 goto out;
372 }
373
374 /* Notify the netvsc driver of the new device */
375 device_info.ring_size = ring_size;
376 ret = rndis_filter_device_add(dev, &device_info);
377 if (ret != 0) {
378 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
379 unregister_netdev(net);
380 free_netdev(net);
381 hv_set_drvdata(dev, NULL);
382 return ret;
383 }
384 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
385
386 netif_carrier_on(net);
387
388out:
389 return ret;
390}
391
392static int netvsc_remove(struct hv_device *dev)
393{
394 struct net_device *net;
395 struct net_device_context *ndev_ctx;
396 struct netvsc_device *net_device;
397
398 net_device = hv_get_drvdata(dev);
399 net = net_device->ndev;
400
401 if (net == NULL) {
402 dev_err(&dev->device, "No net device to remove\n");
403 return 0;
404 }
405
406 ndev_ctx = netdev_priv(net);
407 cancel_delayed_work_sync(&ndev_ctx->dwork);
408
409 /* Stop outbound asap */
410 netif_stop_queue(net);
411
412 unregister_netdev(net);
413
414 /*
415 * Call to the vsc driver to let it know that the device is being
416 * removed
417 */
418 rndis_filter_device_remove(dev);
419
420 free_netdev(net);
421 return 0;
422}
423
424static const struct hv_vmbus_device_id id_table[] = {
425 /* Network guid */
426 { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
427 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
428 { },
429};
430
431MODULE_DEVICE_TABLE(vmbus, id_table);
432
433/* The one and only one */
434static struct hv_driver netvsc_drv = {
435 .name = "netvsc",
436 .id_table = id_table,
437 .probe = netvsc_probe,
438 .remove = netvsc_remove,
439};
440
441static void __exit netvsc_drv_exit(void)
442{
443 vmbus_driver_unregister(&netvsc_drv);
444}
445
446static int __init netvsc_drv_init(void)
447{
448 return vmbus_driver_register(&netvsc_drv);
449}
450
451MODULE_LICENSE("GPL");
452MODULE_VERSION(HV_DRV_VERSION);
453MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
454
455module_init(netvsc_drv_init);
456module_exit(netvsc_drv_exit);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
new file mode 100644
index 000000000000..bafccb360041
--- /dev/null
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -0,0 +1,855 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/wait.h>
24#include <linux/highmem.h>
25#include <linux/slab.h>
26#include <linux/io.h>
27#include <linux/if_ether.h>
28#include <linux/netdevice.h>
29
30#include "hyperv_net.h"
31
32
33enum rndis_device_state {
34 RNDIS_DEV_UNINITIALIZED = 0,
35 RNDIS_DEV_INITIALIZING,
36 RNDIS_DEV_INITIALIZED,
37 RNDIS_DEV_DATAINITIALIZED,
38};
39
40struct rndis_device {
41 struct netvsc_device *net_dev;
42
43 enum rndis_device_state state;
44 bool link_state;
45 atomic_t new_req_id;
46
47 spinlock_t request_lock;
48 struct list_head req_list;
49
50 unsigned char hw_mac_adr[ETH_ALEN];
51};
52
53struct rndis_request {
54 struct list_head list_ent;
55 struct completion wait_event;
56
57 /*
58 * FIXME: We assumed a fixed size response here. If we do ever need to
59 * handle a bigger response, we can either define a max response
60 * message or add a response buffer variable above this field
61 */
62 struct rndis_message response_msg;
63
64 /* Simplify allocation by having a netvsc packet inline */
65 struct hv_netvsc_packet pkt;
66 struct hv_page_buffer buf;
67 /* FIXME: We assumed a fixed size request here. */
68 struct rndis_message request_msg;
69};
70
71static void rndis_filter_send_completion(void *ctx);
72
73static void rndis_filter_send_request_completion(void *ctx);
74
75
76
77static struct rndis_device *get_rndis_device(void)
78{
79 struct rndis_device *device;
80
81 device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
82 if (!device)
83 return NULL;
84
85 spin_lock_init(&device->request_lock);
86
87 INIT_LIST_HEAD(&device->req_list);
88
89 device->state = RNDIS_DEV_UNINITIALIZED;
90
91 return device;
92}
93
94static struct rndis_request *get_rndis_request(struct rndis_device *dev,
95 u32 msg_type,
96 u32 msg_len)
97{
98 struct rndis_request *request;
99 struct rndis_message *rndis_msg;
100 struct rndis_set_request *set;
101 unsigned long flags;
102
103 request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
104 if (!request)
105 return NULL;
106
107 init_completion(&request->wait_event);
108
109 rndis_msg = &request->request_msg;
110 rndis_msg->ndis_msg_type = msg_type;
111 rndis_msg->msg_len = msg_len;
112
113 /*
114 * Set the request id. This field is always after the rndis header for
115 * request/response packet types so we just used the SetRequest as a
116 * template
117 */
118 set = &rndis_msg->msg.set_req;
119 set->req_id = atomic_inc_return(&dev->new_req_id);
120
121 /* Add to the request list */
122 spin_lock_irqsave(&dev->request_lock, flags);
123 list_add_tail(&request->list_ent, &dev->req_list);
124 spin_unlock_irqrestore(&dev->request_lock, flags);
125
126 return request;
127}
128
129static void put_rndis_request(struct rndis_device *dev,
130 struct rndis_request *req)
131{
132 unsigned long flags;
133
134 spin_lock_irqsave(&dev->request_lock, flags);
135 list_del(&req->list_ent);
136 spin_unlock_irqrestore(&dev->request_lock, flags);
137
138 kfree(req);
139}
140
141static void dump_rndis_message(struct hv_device *hv_dev,
142 struct rndis_message *rndis_msg)
143{
144 struct net_device *netdev;
145 struct netvsc_device *net_device;
146
147 net_device = hv_get_drvdata(hv_dev);
148 netdev = net_device->ndev;
149
150 switch (rndis_msg->ndis_msg_type) {
151 case REMOTE_NDIS_PACKET_MSG:
152 netdev_dbg(netdev, "REMOTE_NDIS_PACKET_MSG (len %u, "
153 "data offset %u data len %u, # oob %u, "
154 "oob offset %u, oob len %u, pkt offset %u, "
155 "pkt len %u\n",
156 rndis_msg->msg_len,
157 rndis_msg->msg.pkt.data_offset,
158 rndis_msg->msg.pkt.data_len,
159 rndis_msg->msg.pkt.num_oob_data_elements,
160 rndis_msg->msg.pkt.oob_data_offset,
161 rndis_msg->msg.pkt.oob_data_len,
162 rndis_msg->msg.pkt.per_pkt_info_offset,
163 rndis_msg->msg.pkt.per_pkt_info_len);
164 break;
165
166 case REMOTE_NDIS_INITIALIZE_CMPLT:
167 netdev_dbg(netdev, "REMOTE_NDIS_INITIALIZE_CMPLT "
168 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
169 "device flags %d, max xfer size 0x%x, max pkts %u, "
170 "pkt aligned %u)\n",
171 rndis_msg->msg_len,
172 rndis_msg->msg.init_complete.req_id,
173 rndis_msg->msg.init_complete.status,
174 rndis_msg->msg.init_complete.major_ver,
175 rndis_msg->msg.init_complete.minor_ver,
176 rndis_msg->msg.init_complete.dev_flags,
177 rndis_msg->msg.init_complete.max_xfer_size,
178 rndis_msg->msg.init_complete.
179 max_pkt_per_msg,
180 rndis_msg->msg.init_complete.
181 pkt_alignment_factor);
182 break;
183
184 case REMOTE_NDIS_QUERY_CMPLT:
185 netdev_dbg(netdev, "REMOTE_NDIS_QUERY_CMPLT "
186 "(len %u, id 0x%x, status 0x%x, buf len %u, "
187 "buf offset %u)\n",
188 rndis_msg->msg_len,
189 rndis_msg->msg.query_complete.req_id,
190 rndis_msg->msg.query_complete.status,
191 rndis_msg->msg.query_complete.
192 info_buflen,
193 rndis_msg->msg.query_complete.
194 info_buf_offset);
195 break;
196
197 case REMOTE_NDIS_SET_CMPLT:
198 netdev_dbg(netdev,
199 "REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)\n",
200 rndis_msg->msg_len,
201 rndis_msg->msg.set_complete.req_id,
202 rndis_msg->msg.set_complete.status);
203 break;
204
205 case REMOTE_NDIS_INDICATE_STATUS_MSG:
206 netdev_dbg(netdev, "REMOTE_NDIS_INDICATE_STATUS_MSG "
207 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
208 rndis_msg->msg_len,
209 rndis_msg->msg.indicate_status.status,
210 rndis_msg->msg.indicate_status.status_buflen,
211 rndis_msg->msg.indicate_status.status_buf_offset);
212 break;
213
214 default:
215 netdev_dbg(netdev, "0x%x (len %u)\n",
216 rndis_msg->ndis_msg_type,
217 rndis_msg->msg_len);
218 break;
219 }
220}
221
222static int rndis_filter_send_request(struct rndis_device *dev,
223 struct rndis_request *req)
224{
225 int ret;
226 struct hv_netvsc_packet *packet;
227
228 /* Setup the packet to send it */
229 packet = &req->pkt;
230
231 packet->is_data_pkt = false;
232 packet->total_data_buflen = req->request_msg.msg_len;
233 packet->page_buf_cnt = 1;
234
235 packet->page_buf[0].pfn = virt_to_phys(&req->request_msg) >>
236 PAGE_SHIFT;
237 packet->page_buf[0].len = req->request_msg.msg_len;
238 packet->page_buf[0].offset =
239 (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
240
241 packet->completion.send.send_completion_ctx = req;/* packet; */
242 packet->completion.send.send_completion =
243 rndis_filter_send_request_completion;
244 packet->completion.send.send_completion_tid = (unsigned long)dev;
245
246 ret = netvsc_send(dev->net_dev->dev, packet);
247 return ret;
248}
249
250static void rndis_filter_receive_response(struct rndis_device *dev,
251 struct rndis_message *resp)
252{
253 struct rndis_request *request = NULL;
254 bool found = false;
255 unsigned long flags;
256 struct net_device *ndev;
257
258 ndev = dev->net_dev->ndev;
259
260 spin_lock_irqsave(&dev->request_lock, flags);
261 list_for_each_entry(request, &dev->req_list, list_ent) {
262 /*
263 * All request/response message contains RequestId as the 1st
264 * field
265 */
266 if (request->request_msg.msg.init_req.req_id
267 == resp->msg.init_complete.req_id) {
268 found = true;
269 break;
270 }
271 }
272 spin_unlock_irqrestore(&dev->request_lock, flags);
273
274 if (found) {
275 if (resp->msg_len <= sizeof(struct rndis_message)) {
276 memcpy(&request->response_msg, resp,
277 resp->msg_len);
278 } else {
279 netdev_err(ndev,
280 "rndis response buffer overflow "
281 "detected (size %u max %zu)\n",
282 resp->msg_len,
283 sizeof(struct rndis_filter_packet));
284
285 if (resp->ndis_msg_type ==
286 REMOTE_NDIS_RESET_CMPLT) {
287 /* does not have a request id field */
288 request->response_msg.msg.reset_complete.
289 status = STATUS_BUFFER_OVERFLOW;
290 } else {
291 request->response_msg.msg.
292 init_complete.status =
293 STATUS_BUFFER_OVERFLOW;
294 }
295 }
296
297 complete(&request->wait_event);
298 } else {
299 netdev_err(ndev,
300 "no rndis request found for this response "
301 "(id 0x%x res type 0x%x)\n",
302 resp->msg.init_complete.req_id,
303 resp->ndis_msg_type);
304 }
305}
306
307static void rndis_filter_receive_indicate_status(struct rndis_device *dev,
308 struct rndis_message *resp)
309{
310 struct rndis_indicate_status *indicate =
311 &resp->msg.indicate_status;
312
313 if (indicate->status == RNDIS_STATUS_MEDIA_CONNECT) {
314 netvsc_linkstatus_callback(
315 dev->net_dev->dev, 1);
316 } else if (indicate->status == RNDIS_STATUS_MEDIA_DISCONNECT) {
317 netvsc_linkstatus_callback(
318 dev->net_dev->dev, 0);
319 } else {
320 /*
321 * TODO:
322 */
323 }
324}
325
326static void rndis_filter_receive_data(struct rndis_device *dev,
327 struct rndis_message *msg,
328 struct hv_netvsc_packet *pkt)
329{
330 struct rndis_packet *rndis_pkt;
331 u32 data_offset;
332 int i;
333
334 rndis_pkt = &msg->msg.pkt;
335
336 /*
337 * FIXME: Handle multiple rndis pkt msgs that maybe enclosed in this
338 * netvsc packet (ie TotalDataBufferLength != MessageLength)
339 */
340
341 /* Remove the rndis header and pass it back up the stack */
342 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
343
344 pkt->total_data_buflen -= data_offset;
345 pkt->page_buf[0].offset += data_offset;
346 pkt->page_buf[0].len -= data_offset;
347
348 /* Drop the 0th page, if rndis data go beyond page boundary */
349 if (pkt->page_buf[0].offset >= PAGE_SIZE) {
350 pkt->page_buf[1].offset = pkt->page_buf[0].offset - PAGE_SIZE;
351 pkt->page_buf[1].len -= pkt->page_buf[1].offset;
352 pkt->page_buf_cnt--;
353 for (i = 0; i < pkt->page_buf_cnt; i++)
354 pkt->page_buf[i] = pkt->page_buf[i+1];
355 }
356
357 pkt->is_data_pkt = true;
358
359 netvsc_recv_callback(dev->net_dev->dev, pkt);
360}
361
362int rndis_filter_receive(struct hv_device *dev,
363 struct hv_netvsc_packet *pkt)
364{
365 struct netvsc_device *net_dev = hv_get_drvdata(dev);
366 struct rndis_device *rndis_dev;
367 struct rndis_message rndis_msg;
368 struct rndis_message *rndis_hdr;
369 struct net_device *ndev;
370
371 if (!net_dev)
372 return -EINVAL;
373
374 ndev = net_dev->ndev;
375
376 /* Make sure the rndis device state is initialized */
377 if (!net_dev->extension) {
378 netdev_err(ndev, "got rndis message but no rndis device - "
379 "dropping this message!\n");
380 return -ENODEV;
381 }
382
383 rndis_dev = (struct rndis_device *)net_dev->extension;
384 if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
385 netdev_err(ndev, "got rndis message but rndis device "
386 "uninitialized...dropping this message!\n");
387 return -ENODEV;
388 }
389
390 rndis_hdr = (struct rndis_message *)kmap_atomic(
391 pfn_to_page(pkt->page_buf[0].pfn), KM_IRQ0);
392
393 rndis_hdr = (void *)((unsigned long)rndis_hdr +
394 pkt->page_buf[0].offset);
395
396 /* Make sure we got a valid rndis message */
397 if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
398 (rndis_hdr->msg_len > sizeof(struct rndis_message))) {
399 netdev_err(ndev, "incoming rndis message buffer overflow "
400 "detected (got %u, max %zu)..marking it an error!\n",
401 rndis_hdr->msg_len,
402 sizeof(struct rndis_message));
403 }
404
405 memcpy(&rndis_msg, rndis_hdr,
406 (rndis_hdr->msg_len > sizeof(struct rndis_message)) ?
407 sizeof(struct rndis_message) :
408 rndis_hdr->msg_len);
409
410 kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset, KM_IRQ0);
411
412 dump_rndis_message(dev, &rndis_msg);
413
414 switch (rndis_msg.ndis_msg_type) {
415 case REMOTE_NDIS_PACKET_MSG:
416 /* data msg */
417 rndis_filter_receive_data(rndis_dev, &rndis_msg, pkt);
418 break;
419
420 case REMOTE_NDIS_INITIALIZE_CMPLT:
421 case REMOTE_NDIS_QUERY_CMPLT:
422 case REMOTE_NDIS_SET_CMPLT:
423 /* completion msgs */
424 rndis_filter_receive_response(rndis_dev, &rndis_msg);
425 break;
426
427 case REMOTE_NDIS_INDICATE_STATUS_MSG:
428 /* notification msgs */
429 rndis_filter_receive_indicate_status(rndis_dev, &rndis_msg);
430 break;
431 default:
432 netdev_err(ndev,
433 "unhandled rndis message (type %u len %u)\n",
434 rndis_msg.ndis_msg_type,
435 rndis_msg.msg_len);
436 break;
437 }
438
439 return 0;
440}
441
442static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
443 void *result, u32 *result_size)
444{
445 struct rndis_request *request;
446 u32 inresult_size = *result_size;
447 struct rndis_query_request *query;
448 struct rndis_query_complete *query_complete;
449 int ret = 0;
450 int t;
451
452 if (!result)
453 return -EINVAL;
454
455 *result_size = 0;
456 request = get_rndis_request(dev, REMOTE_NDIS_QUERY_MSG,
457 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
458 if (!request) {
459 ret = -ENOMEM;
460 goto cleanup;
461 }
462
463 /* Setup the rndis query */
464 query = &request->request_msg.msg.query_req;
465 query->oid = oid;
466 query->info_buf_offset = sizeof(struct rndis_query_request);
467 query->info_buflen = 0;
468 query->dev_vc_handle = 0;
469
470 ret = rndis_filter_send_request(dev, request);
471 if (ret != 0)
472 goto cleanup;
473
474 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
475 if (t == 0) {
476 ret = -ETIMEDOUT;
477 goto cleanup;
478 }
479
480 /* Copy the response back */
481 query_complete = &request->response_msg.msg.query_complete;
482
483 if (query_complete->info_buflen > inresult_size) {
484 ret = -1;
485 goto cleanup;
486 }
487
488 memcpy(result,
489 (void *)((unsigned long)query_complete +
490 query_complete->info_buf_offset),
491 query_complete->info_buflen);
492
493 *result_size = query_complete->info_buflen;
494
495cleanup:
496 if (request)
497 put_rndis_request(dev, request);
498
499 return ret;
500}
501
502static int rndis_filter_query_device_mac(struct rndis_device *dev)
503{
504 u32 size = ETH_ALEN;
505
506 return rndis_filter_query_device(dev,
507 RNDIS_OID_802_3_PERMANENT_ADDRESS,
508 dev->hw_mac_adr, &size);
509}
510
511static int rndis_filter_query_device_link_status(struct rndis_device *dev)
512{
513 u32 size = sizeof(u32);
514 u32 link_status;
515 int ret;
516
517 ret = rndis_filter_query_device(dev,
518 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
519 &link_status, &size);
520 dev->link_state = (link_status != 0) ? true : false;
521
522 return ret;
523}
524
525static int rndis_filter_set_packet_filter(struct rndis_device *dev,
526 u32 new_filter)
527{
528 struct rndis_request *request;
529 struct rndis_set_request *set;
530 struct rndis_set_complete *set_complete;
531 u32 status;
532 int ret, t;
533 struct net_device *ndev;
534
535 ndev = dev->net_dev->ndev;
536
537 request = get_rndis_request(dev, REMOTE_NDIS_SET_MSG,
538 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
539 sizeof(u32));
540 if (!request) {
541 ret = -ENOMEM;
542 goto cleanup;
543 }
544
545 /* Setup the rndis set */
546 set = &request->request_msg.msg.set_req;
547 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
548 set->info_buflen = sizeof(u32);
549 set->info_buf_offset = sizeof(struct rndis_set_request);
550
551 memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
552 &new_filter, sizeof(u32));
553
554 ret = rndis_filter_send_request(dev, request);
555 if (ret != 0)
556 goto cleanup;
557
558 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
559
560 if (t == 0) {
561 netdev_err(ndev,
562 "timeout before we got a set response...\n");
563 /*
564 * We can't deallocate the request since we may still receive a
565 * send completion for it.
566 */
567 goto exit;
568 } else {
569 set_complete = &request->response_msg.msg.set_complete;
570 status = set_complete->status;
571 }
572
573cleanup:
574 if (request)
575 put_rndis_request(dev, request);
576exit:
577 return ret;
578}
579
580
581static int rndis_filter_init_device(struct rndis_device *dev)
582{
583 struct rndis_request *request;
584 struct rndis_initialize_request *init;
585 struct rndis_initialize_complete *init_complete;
586 u32 status;
587 int ret, t;
588
589 request = get_rndis_request(dev, REMOTE_NDIS_INITIALIZE_MSG,
590 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
591 if (!request) {
592 ret = -ENOMEM;
593 goto cleanup;
594 }
595
596 /* Setup the rndis set */
597 init = &request->request_msg.msg.init_req;
598 init->major_ver = RNDIS_MAJOR_VERSION;
599 init->minor_ver = RNDIS_MINOR_VERSION;
600 /* FIXME: Use 1536 - rounded ethernet frame size */
601 init->max_xfer_size = 2048;
602
603 dev->state = RNDIS_DEV_INITIALIZING;
604
605 ret = rndis_filter_send_request(dev, request);
606 if (ret != 0) {
607 dev->state = RNDIS_DEV_UNINITIALIZED;
608 goto cleanup;
609 }
610
611
612 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
613
614 if (t == 0) {
615 ret = -ETIMEDOUT;
616 goto cleanup;
617 }
618
619 init_complete = &request->response_msg.msg.init_complete;
620 status = init_complete->status;
621 if (status == RNDIS_STATUS_SUCCESS) {
622 dev->state = RNDIS_DEV_INITIALIZED;
623 ret = 0;
624 } else {
625 dev->state = RNDIS_DEV_UNINITIALIZED;
626 ret = -EINVAL;
627 }
628
629cleanup:
630 if (request)
631 put_rndis_request(dev, request);
632
633 return ret;
634}
635
636static void rndis_filter_halt_device(struct rndis_device *dev)
637{
638 struct rndis_request *request;
639 struct rndis_halt_request *halt;
640
641 /* Attempt to do a rndis device halt */
642 request = get_rndis_request(dev, REMOTE_NDIS_HALT_MSG,
643 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
644 if (!request)
645 goto cleanup;
646
647 /* Setup the rndis set */
648 halt = &request->request_msg.msg.halt_req;
649 halt->req_id = atomic_inc_return(&dev->new_req_id);
650
651 /* Ignore return since this msg is optional. */
652 rndis_filter_send_request(dev, request);
653
654 dev->state = RNDIS_DEV_UNINITIALIZED;
655
656cleanup:
657 if (request)
658 put_rndis_request(dev, request);
659 return;
660}
661
662static int rndis_filter_open_device(struct rndis_device *dev)
663{
664 int ret;
665
666 if (dev->state != RNDIS_DEV_INITIALIZED)
667 return 0;
668
669 ret = rndis_filter_set_packet_filter(dev,
670 NDIS_PACKET_TYPE_BROADCAST |
671 NDIS_PACKET_TYPE_ALL_MULTICAST |
672 NDIS_PACKET_TYPE_DIRECTED);
673 if (ret == 0)
674 dev->state = RNDIS_DEV_DATAINITIALIZED;
675
676 return ret;
677}
678
679static int rndis_filter_close_device(struct rndis_device *dev)
680{
681 int ret;
682
683 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
684 return 0;
685
686 ret = rndis_filter_set_packet_filter(dev, 0);
687 if (ret == 0)
688 dev->state = RNDIS_DEV_INITIALIZED;
689
690 return ret;
691}
692
693int rndis_filter_device_add(struct hv_device *dev,
694 void *additional_info)
695{
696 int ret;
697 struct netvsc_device *net_device;
698 struct rndis_device *rndis_device;
699 struct netvsc_device_info *device_info = additional_info;
700
701 rndis_device = get_rndis_device();
702 if (!rndis_device)
703 return -ENODEV;
704
705 /*
706 * Let the inner driver handle this first to create the netvsc channel
707 * NOTE! Once the channel is created, we may get a receive callback
708 * (RndisFilterOnReceive()) before this call is completed
709 */
710 ret = netvsc_device_add(dev, additional_info);
711 if (ret != 0) {
712 kfree(rndis_device);
713 return ret;
714 }
715
716
717 /* Initialize the rndis device */
718 net_device = hv_get_drvdata(dev);
719
720 net_device->extension = rndis_device;
721 rndis_device->net_dev = net_device;
722
723 /* Send the rndis initialization message */
724 ret = rndis_filter_init_device(rndis_device);
725 if (ret != 0) {
726 /*
727 * TODO: If rndis init failed, we will need to shut down the
728 * channel
729 */
730 }
731
732 /* Get the mac address */
733 ret = rndis_filter_query_device_mac(rndis_device);
734 if (ret != 0) {
735 /*
736 * TODO: shutdown rndis device and the channel
737 */
738 }
739
740 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
741
742 rndis_filter_query_device_link_status(rndis_device);
743
744 device_info->link_state = rndis_device->link_state;
745
746 dev_info(&dev->device, "Device MAC %pM link state %s\n",
747 rndis_device->hw_mac_adr,
748 device_info->link_state ? "down" : "up");
749
750 return ret;
751}
752
753void rndis_filter_device_remove(struct hv_device *dev)
754{
755 struct netvsc_device *net_dev = hv_get_drvdata(dev);
756 struct rndis_device *rndis_dev = net_dev->extension;
757
758 /* Halt and release the rndis device */
759 rndis_filter_halt_device(rndis_dev);
760
761 kfree(rndis_dev);
762 net_dev->extension = NULL;
763
764 netvsc_device_remove(dev);
765}
766
767
768int rndis_filter_open(struct hv_device *dev)
769{
770 struct netvsc_device *net_device = hv_get_drvdata(dev);
771
772 if (!net_device)
773 return -EINVAL;
774
775 return rndis_filter_open_device(net_device->extension);
776}
777
778int rndis_filter_close(struct hv_device *dev)
779{
780 struct netvsc_device *netDevice = hv_get_drvdata(dev);
781
782 if (!netDevice)
783 return -EINVAL;
784
785 return rndis_filter_close_device(netDevice->extension);
786}
787
788int rndis_filter_send(struct hv_device *dev,
789 struct hv_netvsc_packet *pkt)
790{
791 int ret;
792 struct rndis_filter_packet *filterPacket;
793 struct rndis_message *rndisMessage;
794 struct rndis_packet *rndisPacket;
795 u32 rndisMessageSize;
796
797 /* Add the rndis header */
798 filterPacket = (struct rndis_filter_packet *)pkt->extension;
799
800 memset(filterPacket, 0, sizeof(struct rndis_filter_packet));
801
802 rndisMessage = &filterPacket->msg;
803 rndisMessageSize = RNDIS_MESSAGE_SIZE(struct rndis_packet);
804
805 rndisMessage->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
806 rndisMessage->msg_len = pkt->total_data_buflen +
807 rndisMessageSize;
808
809 rndisPacket = &rndisMessage->msg.pkt;
810 rndisPacket->data_offset = sizeof(struct rndis_packet);
811 rndisPacket->data_len = pkt->total_data_buflen;
812
813 pkt->is_data_pkt = true;
814 pkt->page_buf[0].pfn = virt_to_phys(rndisMessage) >> PAGE_SHIFT;
815 pkt->page_buf[0].offset =
816 (unsigned long)rndisMessage & (PAGE_SIZE-1);
817 pkt->page_buf[0].len = rndisMessageSize;
818
819 /* Save the packet send completion and context */
820 filterPacket->completion = pkt->completion.send.send_completion;
821 filterPacket->completion_ctx =
822 pkt->completion.send.send_completion_ctx;
823
824 /* Use ours */
825 pkt->completion.send.send_completion = rndis_filter_send_completion;
826 pkt->completion.send.send_completion_ctx = filterPacket;
827
828 ret = netvsc_send(dev, pkt);
829 if (ret != 0) {
830 /*
831 * Reset the completion to originals to allow retries from
832 * above
833 */
834 pkt->completion.send.send_completion =
835 filterPacket->completion;
836 pkt->completion.send.send_completion_ctx =
837 filterPacket->completion_ctx;
838 }
839
840 return ret;
841}
842
843static void rndis_filter_send_completion(void *ctx)
844{
845 struct rndis_filter_packet *filterPacket = ctx;
846
847 /* Pass it back to the original handler */
848 filterPacket->completion(filterPacket->completion_ctx);
849}
850
851
852static void rndis_filter_send_request_completion(void *ctx)
853{
854 /* Noop */
855}