aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r--drivers/net/hyperv/hyperv_net.h207
-rw-r--r--drivers/net/hyperv/netvsc.c93
-rw-r--r--drivers/net/hyperv/netvsc_drv.c337
-rw-r--r--drivers/net/hyperv/rndis_filter.c187
4 files changed, 607 insertions, 217 deletions
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 7b594ce3f21d..13010b4dae5b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -30,6 +30,7 @@
30 30
31/* Fwd declaration */ 31/* Fwd declaration */
32struct hv_netvsc_packet; 32struct hv_netvsc_packet;
33struct ndis_tcp_ip_checksum_info;
33 34
34/* Represent the xfer page packet which contains 1 or more netvsc packet */ 35/* Represent the xfer page packet which contains 1 or more netvsc packet */
35struct xferpage_packet { 36struct xferpage_packet {
@@ -73,7 +74,7 @@ struct hv_netvsc_packet {
73 } completion; 74 } completion;
74 75
75 /* This points to the memory after page_buf */ 76 /* This points to the memory after page_buf */
76 void *extension; 77 struct rndis_message *rndis_msg;
77 78
78 u32 total_data_buflen; 79 u32 total_data_buflen;
79 /* Points to the send/receive buffer where the ethernet frame is */ 80 /* Points to the send/receive buffer where the ethernet frame is */
@@ -117,7 +118,8 @@ int netvsc_send(struct hv_device *device,
117void netvsc_linkstatus_callback(struct hv_device *device_obj, 118void netvsc_linkstatus_callback(struct hv_device *device_obj,
118 unsigned int status); 119 unsigned int status);
119int netvsc_recv_callback(struct hv_device *device_obj, 120int netvsc_recv_callback(struct hv_device *device_obj,
120 struct hv_netvsc_packet *packet); 121 struct hv_netvsc_packet *packet,
122 struct ndis_tcp_ip_checksum_info *csum_info);
121int rndis_filter_open(struct hv_device *dev); 123int rndis_filter_open(struct hv_device *dev);
122int rndis_filter_close(struct hv_device *dev); 124int rndis_filter_close(struct hv_device *dev);
123int rndis_filter_device_add(struct hv_device *dev, 125int rndis_filter_device_add(struct hv_device *dev,
@@ -126,11 +128,6 @@ void rndis_filter_device_remove(struct hv_device *dev);
126int rndis_filter_receive(struct hv_device *dev, 128int rndis_filter_receive(struct hv_device *dev,
127 struct hv_netvsc_packet *pkt); 129 struct hv_netvsc_packet *pkt);
128 130
129
130
131int rndis_filter_send(struct hv_device *dev,
132 struct hv_netvsc_packet *pkt);
133
134int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); 131int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
135int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac); 132int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
136 133
@@ -139,6 +136,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
139 136
140#define NVSP_PROTOCOL_VERSION_1 2 137#define NVSP_PROTOCOL_VERSION_1 2
141#define NVSP_PROTOCOL_VERSION_2 0x30002 138#define NVSP_PROTOCOL_VERSION_2 0x30002
139#define NVSP_PROTOCOL_VERSION_4 0x40000
140#define NVSP_PROTOCOL_VERSION_5 0x50000
142 141
143enum { 142enum {
144 NVSP_MSG_TYPE_NONE = 0, 143 NVSP_MSG_TYPE_NONE = 0,
@@ -193,6 +192,23 @@ enum {
193 192
194 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE, 193 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
195 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP, 194 NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
195
196 NVSP_MSG2_MAX = NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
197
198 /* Version 4 messages */
199 NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION,
200 NVSP_MSG4_TYPE_SWITCH_DATA_PATH,
201 NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
202
203 NVSP_MSG4_MAX = NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
204
205 /* Version 5 messages */
206 NVSP_MSG5_TYPE_OID_QUERY_EX,
207 NVSP_MSG5_TYPE_OID_QUERY_EX_COMP,
208 NVSP_MSG5_TYPE_SUBCHANNEL,
209 NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
210
211 NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
196}; 212};
197 213
198enum { 214enum {
@@ -447,10 +463,44 @@ union nvsp_2_message_uber {
447 struct nvsp_2_free_rxbuf free_rxbuf; 463 struct nvsp_2_free_rxbuf free_rxbuf;
448} __packed; 464} __packed;
449 465
466enum nvsp_subchannel_operation {
467 NVSP_SUBCHANNEL_NONE = 0,
468 NVSP_SUBCHANNEL_ALLOCATE,
469 NVSP_SUBCHANNEL_MAX
470};
471
472struct nvsp_5_subchannel_request {
473 u32 op;
474 u32 num_subchannels;
475} __packed;
476
477struct nvsp_5_subchannel_complete {
478 u32 status;
479 u32 num_subchannels; /* Actual number of subchannels allocated */
480} __packed;
481
482struct nvsp_5_send_indirect_table {
483 /* The number of entries in the send indirection table */
484 u32 count;
485
486 /* The offset of the send indireciton table from top of this struct.
487 * The send indirection table tells which channel to put the send
488 * traffic on. Each entry is a channel number.
489 */
490 u32 offset;
491} __packed;
492
493union nvsp_5_message_uber {
494 struct nvsp_5_subchannel_request subchn_req;
495 struct nvsp_5_subchannel_complete subchn_comp;
496 struct nvsp_5_send_indirect_table send_table;
497} __packed;
498
450union nvsp_all_messages { 499union nvsp_all_messages {
451 union nvsp_message_init_uber init_msg; 500 union nvsp_message_init_uber init_msg;
452 union nvsp_1_message_uber v1_msg; 501 union nvsp_1_message_uber v1_msg;
453 union nvsp_2_message_uber v2_msg; 502 union nvsp_2_message_uber v2_msg;
503 union nvsp_5_message_uber v5_msg;
454} __packed; 504} __packed;
455 505
456/* ALL Messages */ 506/* ALL Messages */
@@ -463,6 +513,7 @@ struct nvsp_message {
463#define NETVSC_MTU 65536 513#define NETVSC_MTU 65536
464 514
465#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ 515#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
516#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
466 517
467#define NETVSC_RECEIVE_BUFFER_ID 0xcafe 518#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
468 519
@@ -506,6 +557,8 @@ struct netvsc_device {
506 557
507 /* Holds rndis device info */ 558 /* Holds rndis device info */
508 void *extension; 559 void *extension;
560 /* The recive buffer for this device */
561 unsigned char cb_buffer[NETVSC_PACKET_SIZE];
509}; 562};
510 563
511/* NdisInitialize message */ 564/* NdisInitialize message */
@@ -671,9 +724,133 @@ struct ndis_pkt_8021q_info {
671 }; 724 };
672}; 725};
673 726
727struct ndis_oject_header {
728 u8 type;
729 u8 revision;
730 u16 size;
731};
732
733#define NDIS_OBJECT_TYPE_DEFAULT 0x80
734#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
735#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
736#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
737#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2
738#define NDIS_OFFLOAD_PARAMETERS_LSOV1_ENABLED 2
739#define NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED 1
740#define NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED 2
741#define NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED 1
742#define NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED 2
743#define NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED 3
744#define NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED 4
745
746#define NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE 1
747#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0
748#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1
749
750/*
751 * New offload OIDs for NDIS 6
752 */
753#define OID_TCP_OFFLOAD_CURRENT_CONFIG 0xFC01020B /* query only */
754#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C /* set only */
755#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D/* query only */
756#define OID_TCP_CONNECTION_OFFLOAD_CURRENT_CONFIG 0xFC01020E /* query only */
757#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
758#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
759
760struct ndis_offload_params {
761 struct ndis_oject_header header;
762 u8 ip_v4_csum;
763 u8 tcp_ip_v4_csum;
764 u8 udp_ip_v4_csum;
765 u8 tcp_ip_v6_csum;
766 u8 udp_ip_v6_csum;
767 u8 lso_v1;
768 u8 ip_sec_v1;
769 u8 lso_v2_ipv4;
770 u8 lso_v2_ipv6;
771 u8 tcp_connection_ip_v4;
772 u8 tcp_connection_ip_v6;
773 u32 flags;
774 u8 ip_sec_v2;
775 u8 ip_sec_v2_ip_v4;
776 struct {
777 u8 rsc_ip_v4;
778 u8 rsc_ip_v6;
779 };
780 struct {
781 u8 encapsulated_packet_task_offload;
782 u8 encapsulation_types;
783 };
784};
785
786struct ndis_tcp_ip_checksum_info {
787 union {
788 struct {
789 u32 is_ipv4:1;
790 u32 is_ipv6:1;
791 u32 tcp_checksum:1;
792 u32 udp_checksum:1;
793 u32 ip_header_checksum:1;
794 u32 reserved:11;
795 u32 tcp_header_offset:10;
796 } transmit;
797 struct {
798 u32 tcp_checksum_failed:1;
799 u32 udp_checksum_failed:1;
800 u32 ip_checksum_failed:1;
801 u32 tcp_checksum_succeeded:1;
802 u32 udp_checksum_succeeded:1;
803 u32 ip_checksum_succeeded:1;
804 u32 loopback:1;
805 u32 tcp_checksum_value_invalid:1;
806 u32 ip_checksum_value_invalid:1;
807 } receive;
808 u32 value;
809 };
810};
811
812struct ndis_tcp_lso_info {
813 union {
814 struct {
815 u32 unused:30;
816 u32 type:1;
817 u32 reserved2:1;
818 } transmit;
819 struct {
820 u32 mss:20;
821 u32 tcp_header_offset:10;
822 u32 type:1;
823 u32 reserved2:1;
824 } lso_v1_transmit;
825 struct {
826 u32 tcp_payload:30;
827 u32 type:1;
828 u32 reserved2:1;
829 } lso_v1_transmit_complete;
830 struct {
831 u32 mss:20;
832 u32 tcp_header_offset:10;
833 u32 type:1;
834 u32 ip_version:1;
835 } lso_v2_transmit;
836 struct {
837 u32 reserved:30;
838 u32 type:1;
839 u32 reserved2:1;
840 } lso_v2_transmit_complete;
841 u32 value;
842 };
843};
844
674#define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \ 845#define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
675 sizeof(struct ndis_pkt_8021q_info)) 846 sizeof(struct ndis_pkt_8021q_info))
676 847
848#define NDIS_CSUM_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
849 sizeof(struct ndis_tcp_ip_checksum_info))
850
851#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
852 sizeof(struct ndis_tcp_lso_info))
853
677/* Format of Information buffer passed in a SetRequest for the OID */ 854/* Format of Information buffer passed in a SetRequest for the OID */
678/* OID_GEN_RNDIS_CONFIG_PARAMETER. */ 855/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
679struct rndis_config_parameter_info { 856struct rndis_config_parameter_info {
@@ -846,12 +1023,6 @@ struct rndis_message {
846}; 1023};
847 1024
848 1025
849struct rndis_filter_packet {
850 void *completion_ctx;
851 void (*completion)(void *context);
852 struct rndis_message msg;
853};
854
855/* Handy macros */ 1026/* Handy macros */
856 1027
857/* get the size of an RNDIS message. Pass in the message type, */ 1028/* get the size of an RNDIS message. Pass in the message type, */
@@ -905,6 +1076,16 @@ struct rndis_filter_packet {
905#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400 1076#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
906#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800 1077#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
907 1078
1079#define INFO_IPV4 2
1080#define INFO_IPV6 4
1081#define INFO_TCP 2
1082#define INFO_UDP 4
1083
1084#define TRANSPORT_INFO_NOT_IP 0
1085#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
1086#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
1087#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
1088#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
908 1089
909 1090
910#endif /* _HYPERV_NET_H */ 1091#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 03a2c6e17158..daddea2654ce 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -290,7 +290,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
290 NVSP_STAT_SUCCESS) 290 NVSP_STAT_SUCCESS)
291 return -EINVAL; 291 return -EINVAL;
292 292
293 if (nvsp_ver != NVSP_PROTOCOL_VERSION_2) 293 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
294 return 0; 294 return 0;
295 295
296 /* NVSPv2 only: Send NDIS config */ 296 /* NVSPv2 only: Send NDIS config */
@@ -314,6 +314,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
314 struct nvsp_message *init_packet; 314 struct nvsp_message *init_packet;
315 int ndis_version; 315 int ndis_version;
316 struct net_device *ndev; 316 struct net_device *ndev;
317 u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
318 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
319 int i, num_ver = 4; /* number of different NVSP versions */
317 320
318 net_device = get_outbound_net_device(device); 321 net_device = get_outbound_net_device(device);
319 if (!net_device) 322 if (!net_device)
@@ -323,13 +326,14 @@ static int netvsc_connect_vsp(struct hv_device *device)
323 init_packet = &net_device->channel_init_pkt; 326 init_packet = &net_device->channel_init_pkt;
324 327
325 /* Negotiate the latest NVSP protocol supported */ 328 /* Negotiate the latest NVSP protocol supported */
326 if (negotiate_nvsp_ver(device, net_device, init_packet, 329 for (i = num_ver - 1; i >= 0; i--)
327 NVSP_PROTOCOL_VERSION_2) == 0) { 330 if (negotiate_nvsp_ver(device, net_device, init_packet,
328 net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2; 331 ver_list[i]) == 0) {
329 } else if (negotiate_nvsp_ver(device, net_device, init_packet, 332 net_device->nvsp_version = ver_list[i];
330 NVSP_PROTOCOL_VERSION_1) == 0) { 333 break;
331 net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1; 334 }
332 } else { 335
336 if (i < 0) {
333 ret = -EPROTO; 337 ret = -EPROTO;
334 goto cleanup; 338 goto cleanup;
335 } 339 }
@@ -339,7 +343,10 @@ static int netvsc_connect_vsp(struct hv_device *device)
339 /* Send the ndis version */ 343 /* Send the ndis version */
340 memset(init_packet, 0, sizeof(struct nvsp_message)); 344 memset(init_packet, 0, sizeof(struct nvsp_message));
341 345
342 ndis_version = 0x00050001; 346 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
347 ndis_version = 0x00050001;
348 else
349 ndis_version = 0x0006001e;
343 350
344 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; 351 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
345 init_packet->msg.v1_msg. 352 init_packet->msg.v1_msg.
@@ -358,6 +365,11 @@ static int netvsc_connect_vsp(struct hv_device *device)
358 goto cleanup; 365 goto cleanup;
359 366
360 /* Post the big receive buffer to NetVSP */ 367 /* Post the big receive buffer to NetVSP */
368 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
369 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
370 else
371 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
372
361 ret = netvsc_init_recv_buf(device); 373 ret = netvsc_init_recv_buf(device);
362 374
363cleanup: 375cleanup:
@@ -432,17 +444,14 @@ static inline u32 hv_ringbuf_avail_percent(
432 return avail_write * 100 / ring_info->ring_datasize; 444 return avail_write * 100 / ring_info->ring_datasize;
433} 445}
434 446
435static void netvsc_send_completion(struct hv_device *device, 447static void netvsc_send_completion(struct netvsc_device *net_device,
448 struct hv_device *device,
436 struct vmpacket_descriptor *packet) 449 struct vmpacket_descriptor *packet)
437{ 450{
438 struct netvsc_device *net_device;
439 struct nvsp_message *nvsp_packet; 451 struct nvsp_message *nvsp_packet;
440 struct hv_netvsc_packet *nvsc_packet; 452 struct hv_netvsc_packet *nvsc_packet;
441 struct net_device *ndev; 453 struct net_device *ndev;
442 454
443 net_device = get_inbound_net_device(device);
444 if (!net_device)
445 return;
446 ndev = net_device->ndev; 455 ndev = net_device->ndev;
447 456
448 nvsp_packet = (struct nvsp_message *)((unsigned long)packet + 457 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
@@ -561,13 +570,13 @@ int netvsc_send(struct hv_device *device,
561} 570}
562 571
563static void netvsc_send_recv_completion(struct hv_device *device, 572static void netvsc_send_recv_completion(struct hv_device *device,
573 struct netvsc_device *net_device,
564 u64 transaction_id, u32 status) 574 u64 transaction_id, u32 status)
565{ 575{
566 struct nvsp_message recvcompMessage; 576 struct nvsp_message recvcompMessage;
567 int retries = 0; 577 int retries = 0;
568 int ret; 578 int ret;
569 struct net_device *ndev; 579 struct net_device *ndev;
570 struct netvsc_device *net_device = hv_get_drvdata(device);
571 580
572 ndev = net_device->ndev; 581 ndev = net_device->ndev;
573 582
@@ -653,14 +662,15 @@ static void netvsc_receive_completion(void *context)
653 662
654 /* Send a receive completion for the xfer page packet */ 663 /* Send a receive completion for the xfer page packet */
655 if (fsend_receive_comp) 664 if (fsend_receive_comp)
656 netvsc_send_recv_completion(device, transaction_id, status); 665 netvsc_send_recv_completion(device, net_device, transaction_id,
666 status);
657 667
658} 668}
659 669
660static void netvsc_receive(struct hv_device *device, 670static void netvsc_receive(struct netvsc_device *net_device,
661 struct vmpacket_descriptor *packet) 671 struct hv_device *device,
672 struct vmpacket_descriptor *packet)
662{ 673{
663 struct netvsc_device *net_device;
664 struct vmtransfer_page_packet_header *vmxferpage_packet; 674 struct vmtransfer_page_packet_header *vmxferpage_packet;
665 struct nvsp_message *nvsp_packet; 675 struct nvsp_message *nvsp_packet;
666 struct hv_netvsc_packet *netvsc_packet = NULL; 676 struct hv_netvsc_packet *netvsc_packet = NULL;
@@ -673,9 +683,6 @@ static void netvsc_receive(struct hv_device *device,
673 683
674 LIST_HEAD(listHead); 684 LIST_HEAD(listHead);
675 685
676 net_device = get_inbound_net_device(device);
677 if (!net_device)
678 return;
679 ndev = net_device->ndev; 686 ndev = net_device->ndev;
680 687
681 /* 688 /*
@@ -741,7 +748,7 @@ static void netvsc_receive(struct hv_device *device,
741 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, 748 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
742 flags); 749 flags);
743 750
744 netvsc_send_recv_completion(device, 751 netvsc_send_recv_completion(device, net_device,
745 vmxferpage_packet->d.trans_id, 752 vmxferpage_packet->d.trans_id,
746 NVSP_STAT_FAIL); 753 NVSP_STAT_FAIL);
747 754
@@ -800,22 +807,16 @@ static void netvsc_channel_cb(void *context)
800 struct netvsc_device *net_device; 807 struct netvsc_device *net_device;
801 u32 bytes_recvd; 808 u32 bytes_recvd;
802 u64 request_id; 809 u64 request_id;
803 unsigned char *packet;
804 struct vmpacket_descriptor *desc; 810 struct vmpacket_descriptor *desc;
805 unsigned char *buffer; 811 unsigned char *buffer;
806 int bufferlen = NETVSC_PACKET_SIZE; 812 int bufferlen = NETVSC_PACKET_SIZE;
807 struct net_device *ndev; 813 struct net_device *ndev;
808 814
809 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
810 GFP_ATOMIC);
811 if (!packet)
812 return;
813 buffer = packet;
814
815 net_device = get_inbound_net_device(device); 815 net_device = get_inbound_net_device(device);
816 if (!net_device) 816 if (!net_device)
817 goto out; 817 return;
818 ndev = net_device->ndev; 818 ndev = net_device->ndev;
819 buffer = net_device->cb_buffer;
819 820
820 do { 821 do {
821 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen, 822 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
@@ -825,11 +826,13 @@ static void netvsc_channel_cb(void *context)
825 desc = (struct vmpacket_descriptor *)buffer; 826 desc = (struct vmpacket_descriptor *)buffer;
826 switch (desc->type) { 827 switch (desc->type) {
827 case VM_PKT_COMP: 828 case VM_PKT_COMP:
828 netvsc_send_completion(device, desc); 829 netvsc_send_completion(net_device,
830 device, desc);
829 break; 831 break;
830 832
831 case VM_PKT_DATA_USING_XFER_PAGES: 833 case VM_PKT_DATA_USING_XFER_PAGES:
832 netvsc_receive(device, desc); 834 netvsc_receive(net_device,
835 device, desc);
833 break; 836 break;
834 837
835 default: 838 default:
@@ -841,23 +844,16 @@ static void netvsc_channel_cb(void *context)
841 break; 844 break;
842 } 845 }
843 846
844 /* reset */
845 if (bufferlen > NETVSC_PACKET_SIZE) {
846 kfree(buffer);
847 buffer = packet;
848 bufferlen = NETVSC_PACKET_SIZE;
849 }
850 } else { 847 } else {
851 /* reset */ 848 /*
852 if (bufferlen > NETVSC_PACKET_SIZE) { 849 * We are done for this pass.
853 kfree(buffer); 850 */
854 buffer = packet;
855 bufferlen = NETVSC_PACKET_SIZE;
856 }
857
858 break; 851 break;
859 } 852 }
853
860 } else if (ret == -ENOBUFS) { 854 } else if (ret == -ENOBUFS) {
855 if (bufferlen > NETVSC_PACKET_SIZE)
856 kfree(buffer);
861 /* Handle large packet */ 857 /* Handle large packet */
862 buffer = kmalloc(bytes_recvd, GFP_ATOMIC); 858 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
863 if (buffer == NULL) { 859 if (buffer == NULL) {
@@ -872,8 +868,8 @@ static void netvsc_channel_cb(void *context)
872 } 868 }
873 } while (1); 869 } while (1);
874 870
875out: 871 if (bufferlen > NETVSC_PACKET_SIZE)
876 kfree(buffer); 872 kfree(buffer);
877 return; 873 return;
878} 874}
879 875
@@ -907,7 +903,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
907 ndev = net_device->ndev; 903 ndev = net_device->ndev;
908 904
909 /* Initialize the NetVSC channel extension */ 905 /* Initialize the NetVSC channel extension */
910 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
911 spin_lock_init(&net_device->recv_pkt_list_lock); 906 spin_lock_init(&net_device->recv_pkt_list_lock);
912 907
913 INIT_LIST_HEAD(&net_device->recv_pkt_list); 908 INIT_LIST_HEAD(&net_device->recv_pkt_list);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d6fce9750b95..4e4cf9e0c8d7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -128,6 +128,27 @@ static int netvsc_close(struct net_device *net)
128 return ret; 128 return ret;
129} 129}
130 130
131static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
132 int pkt_type)
133{
134 struct rndis_packet *rndis_pkt;
135 struct rndis_per_packet_info *ppi;
136
137 rndis_pkt = &msg->msg.pkt;
138 rndis_pkt->data_offset += ppi_size;
139
140 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
141 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
142
143 ppi->size = ppi_size;
144 ppi->type = pkt_type;
145 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
146
147 rndis_pkt->per_pkt_info_len += ppi_size;
148
149 return ppi;
150}
151
131static void netvsc_xmit_completion(void *context) 152static void netvsc_xmit_completion(void *context)
132{ 153{
133 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context; 154 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
@@ -140,22 +161,164 @@ static void netvsc_xmit_completion(void *context)
140 dev_kfree_skb_any(skb); 161 dev_kfree_skb_any(skb);
141} 162}
142 163
164static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
165 struct hv_page_buffer *pb)
166{
167 int j = 0;
168
169 /* Deal with compund pages by ignoring unused part
170 * of the page.
171 */
172 page += (offset >> PAGE_SHIFT);
173 offset &= ~PAGE_MASK;
174
175 while (len > 0) {
176 unsigned long bytes;
177
178 bytes = PAGE_SIZE - offset;
179 if (bytes > len)
180 bytes = len;
181 pb[j].pfn = page_to_pfn(page);
182 pb[j].offset = offset;
183 pb[j].len = bytes;
184
185 offset += bytes;
186 len -= bytes;
187
188 if (offset == PAGE_SIZE && len) {
189 page++;
190 offset = 0;
191 j++;
192 }
193 }
194
195 return j + 1;
196}
197
198static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
199 struct hv_page_buffer *pb)
200{
201 u32 slots_used = 0;
202 char *data = skb->data;
203 int frags = skb_shinfo(skb)->nr_frags;
204 int i;
205
206 /* The packet is laid out thus:
207 * 1. hdr
208 * 2. skb linear data
209 * 3. skb fragment data
210 */
211 if (hdr != NULL)
212 slots_used += fill_pg_buf(virt_to_page(hdr),
213 offset_in_page(hdr),
214 len, &pb[slots_used]);
215
216 slots_used += fill_pg_buf(virt_to_page(data),
217 offset_in_page(data),
218 skb_headlen(skb), &pb[slots_used]);
219
220 for (i = 0; i < frags; i++) {
221 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
222
223 slots_used += fill_pg_buf(skb_frag_page(frag),
224 frag->page_offset,
225 skb_frag_size(frag), &pb[slots_used]);
226 }
227 return slots_used;
228}
229
230static int count_skb_frag_slots(struct sk_buff *skb)
231{
232 int i, frags = skb_shinfo(skb)->nr_frags;
233 int pages = 0;
234
235 for (i = 0; i < frags; i++) {
236 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
237 unsigned long size = skb_frag_size(frag);
238 unsigned long offset = frag->page_offset;
239
240 /* Skip unused frames from start of page */
241 offset &= ~PAGE_MASK;
242 pages += PFN_UP(offset + size);
243 }
244 return pages;
245}
246
247static int netvsc_get_slots(struct sk_buff *skb)
248{
249 char *data = skb->data;
250 unsigned int offset = offset_in_page(data);
251 unsigned int len = skb_headlen(skb);
252 int slots;
253 int frag_slots;
254
255 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
256 frag_slots = count_skb_frag_slots(skb);
257 return slots + frag_slots;
258}
259
260static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
261{
262 u32 ret_val = TRANSPORT_INFO_NOT_IP;
263
264 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
265 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
266 goto not_ip;
267 }
268
269 *trans_off = skb_transport_offset(skb);
270
271 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
272 struct iphdr *iphdr = ip_hdr(skb);
273
274 if (iphdr->protocol == IPPROTO_TCP)
275 ret_val = TRANSPORT_INFO_IPV4_TCP;
276 else if (iphdr->protocol == IPPROTO_UDP)
277 ret_val = TRANSPORT_INFO_IPV4_UDP;
278 } else {
279 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
280 ret_val = TRANSPORT_INFO_IPV6_TCP;
281 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
282 ret_val = TRANSPORT_INFO_IPV6_UDP;
283 }
284
285not_ip:
286 return ret_val;
287}
288
143static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) 289static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
144{ 290{
145 struct net_device_context *net_device_ctx = netdev_priv(net); 291 struct net_device_context *net_device_ctx = netdev_priv(net);
146 struct hv_netvsc_packet *packet; 292 struct hv_netvsc_packet *packet;
147 int ret; 293 int ret;
148 unsigned int i, num_pages, npg_data; 294 unsigned int num_data_pgs;
149 295 struct rndis_message *rndis_msg;
150 /* Add multipages for skb->data and additional 2 for RNDIS */ 296 struct rndis_packet *rndis_pkt;
151 npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1) 297 u32 rndis_msg_size;
152 >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1; 298 bool isvlan;
153 num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2; 299 struct rndis_per_packet_info *ppi;
300 struct ndis_tcp_ip_checksum_info *csum_info;
301 struct ndis_tcp_lso_info *lso_info;
302 int hdr_offset;
303 u32 net_trans_info;
304
305
306 /* We will atmost need two pages to describe the rndis
307 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
308 * of pages in a single packet.
309 */
310 num_data_pgs = netvsc_get_slots(skb) + 2;
311 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
312 netdev_err(net, "Packet too big: %u\n", skb->len);
313 dev_kfree_skb(skb);
314 net->stats.tx_dropped++;
315 return NETDEV_TX_OK;
316 }
154 317
155 /* Allocate a netvsc packet based on # of frags. */ 318 /* Allocate a netvsc packet based on # of frags. */
156 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 319 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
157 (num_pages * sizeof(struct hv_page_buffer)) + 320 (num_data_pgs * sizeof(struct hv_page_buffer)) +
158 sizeof(struct rndis_filter_packet) + 321 sizeof(struct rndis_message) +
159 NDIS_VLAN_PPI_SIZE, GFP_ATOMIC); 322 NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
160 if (!packet) { 323 if (!packet) {
161 /* out of memory, drop packet */ 324 /* out of memory, drop packet */
@@ -168,53 +331,111 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
168 331
169 packet->vlan_tci = skb->vlan_tci; 332 packet->vlan_tci = skb->vlan_tci;
170 333
171 packet->extension = (void *)(unsigned long)packet + 334 packet->is_data_pkt = true;
335 packet->total_data_buflen = skb->len;
336
337 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
172 sizeof(struct hv_netvsc_packet) + 338 sizeof(struct hv_netvsc_packet) +
173 (num_pages * sizeof(struct hv_page_buffer)); 339 (num_data_pgs * sizeof(struct hv_page_buffer)));
340
341 /* Set the completion routine */
342 packet->completion.send.send_completion = netvsc_xmit_completion;
343 packet->completion.send.send_completion_ctx = packet;
344 packet->completion.send.send_completion_tid = (unsigned long)skb;
174 345
175 /* If the rndis msg goes beyond 1 page, we will add 1 later */ 346 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
176 packet->page_buf_cnt = num_pages - 1; 347
348 /* Add the rndis header */
349 rndis_msg = packet->rndis_msg;
350 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
351 rndis_msg->msg_len = packet->total_data_buflen;
352 rndis_pkt = &rndis_msg->msg.pkt;
353 rndis_pkt->data_offset = sizeof(struct rndis_packet);
354 rndis_pkt->data_len = packet->total_data_buflen;
355 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
356
357 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
358
359 if (isvlan) {
360 struct ndis_pkt_8021q_info *vlan;
361
362 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
363 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
364 IEEE_8021Q_INFO);
365 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
366 ppi->ppi_offset);
367 vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
368 vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
369 VLAN_PRIO_SHIFT;
370 }
177 371
178 /* Initialize it from the skb */ 372 net_trans_info = get_net_transport_info(skb, &hdr_offset);
179 packet->total_data_buflen = skb->len; 373 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
374 goto do_send;
375
376 /*
377 * Setup the sendside checksum offload only if this is not a
378 * GSO packet.
379 */
380 if (skb_is_gso(skb))
381 goto do_lso;
382
383 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
384 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
385 TCPIP_CHKSUM_PKTINFO);
386
387 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
388 ppi->ppi_offset);
180 389
181 /* Start filling in the page buffers starting after RNDIS buffer. */ 390 if (net_trans_info & (INFO_IPV4 << 16))
182 packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT; 391 csum_info->transmit.is_ipv4 = 1;
183 packet->page_buf[1].offset
184 = (unsigned long)skb->data & (PAGE_SIZE - 1);
185 if (npg_data == 1)
186 packet->page_buf[1].len = skb_headlen(skb);
187 else 392 else
188 packet->page_buf[1].len = PAGE_SIZE 393 csum_info->transmit.is_ipv6 = 1;
189 - packet->page_buf[1].offset; 394
190 395 if (net_trans_info & INFO_TCP) {
191 for (i = 2; i <= npg_data; i++) { 396 csum_info->transmit.tcp_checksum = 1;
192 packet->page_buf[i].pfn = virt_to_phys(skb->data 397 csum_info->transmit.tcp_header_offset = hdr_offset;
193 + PAGE_SIZE * (i-1)) >> PAGE_SHIFT; 398 } else if (net_trans_info & INFO_UDP) {
194 packet->page_buf[i].offset = 0; 399 csum_info->transmit.udp_checksum = 1;
195 packet->page_buf[i].len = PAGE_SIZE;
196 } 400 }
197 if (npg_data > 1) 401 goto do_send;
198 packet->page_buf[npg_data].len = (((unsigned long)skb->data 402
199 + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1; 403do_lso:
200 404 rndis_msg_size += NDIS_LSO_PPI_SIZE;
201 /* Additional fragments are after SKB data */ 405 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
202 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 406 TCP_LARGESEND_PKTINFO);
203 const skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 407
204 408 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
205 packet->page_buf[i+npg_data+1].pfn = 409 ppi->ppi_offset);
206 page_to_pfn(skb_frag_page(f)); 410
207 packet->page_buf[i+npg_data+1].offset = f->page_offset; 411 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
208 packet->page_buf[i+npg_data+1].len = skb_frag_size(f); 412 if (net_trans_info & (INFO_IPV4 << 16)) {
413 lso_info->lso_v2_transmit.ip_version =
414 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
415 ip_hdr(skb)->tot_len = 0;
416 ip_hdr(skb)->check = 0;
417 tcp_hdr(skb)->check =
418 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
419 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
420 } else {
421 lso_info->lso_v2_transmit.ip_version =
422 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
423 ipv6_hdr(skb)->payload_len = 0;
424 tcp_hdr(skb)->check =
425 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
426 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
209 } 427 }
428 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
429 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
210 430
211 /* Set the completion routine */ 431do_send:
212 packet->completion.send.send_completion = netvsc_xmit_completion; 432 /* Start filling in the page buffers with the rndis hdr */
213 packet->completion.send.send_completion_ctx = packet; 433 rndis_msg->msg_len += rndis_msg_size;
214 packet->completion.send.send_completion_tid = (unsigned long)skb; 434 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
435 skb, &packet->page_buf[0]);
436
437 ret = netvsc_send(net_device_ctx->device_ctx, packet);
215 438
216 ret = rndis_filter_send(net_device_ctx->device_ctx,
217 packet);
218 if (ret == 0) { 439 if (ret == 0) {
219 net->stats.tx_bytes += skb->len; 440 net->stats.tx_bytes += skb->len;
220 net->stats.tx_packets++; 441 net->stats.tx_packets++;
@@ -264,7 +485,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
264 * "wire" on the specified device. 485 * "wire" on the specified device.
265 */ 486 */
266int netvsc_recv_callback(struct hv_device *device_obj, 487int netvsc_recv_callback(struct hv_device *device_obj,
267 struct hv_netvsc_packet *packet) 488 struct hv_netvsc_packet *packet,
489 struct ndis_tcp_ip_checksum_info *csum_info)
268{ 490{
269 struct net_device *net; 491 struct net_device *net;
270 struct sk_buff *skb; 492 struct sk_buff *skb;
@@ -291,7 +513,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
291 packet->total_data_buflen); 513 packet->total_data_buflen);
292 514
293 skb->protocol = eth_type_trans(skb, net); 515 skb->protocol = eth_type_trans(skb, net);
294 skb->ip_summed = CHECKSUM_NONE; 516 if (csum_info) {
517 /* We only look at the IP checksum here.
518 * Should we be dropping the packet if checksum
519 * failed? How do we deal with other checksums - TCP/UDP?
520 */
521 if (csum_info->receive.ip_checksum_succeeded)
522 skb->ip_summed = CHECKSUM_UNNECESSARY;
523 else
524 skb->ip_summed = CHECKSUM_NONE;
525 }
526
295 if (packet->vlan_tci & VLAN_TAG_PRESENT) 527 if (packet->vlan_tci & VLAN_TAG_PRESENT)
296 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 528 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
297 packet->vlan_tci); 529 packet->vlan_tci);
@@ -327,7 +559,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
327 if (nvdev == NULL || nvdev->destroy) 559 if (nvdev == NULL || nvdev->destroy)
328 return -ENODEV; 560 return -ENODEV;
329 561
330 if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2) 562 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
331 limit = NETVSC_MTU; 563 limit = NETVSC_MTU;
332 564
333 if (mtu < 68 || mtu > limit) 565 if (mtu < 68 || mtu > limit)
@@ -452,9 +684,10 @@ static int netvsc_probe(struct hv_device *dev,
452 684
453 net->netdev_ops = &device_ops; 685 net->netdev_ops = &device_ops;
454 686
455 /* TODO: Add GSO and Checksum offload */ 687 net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
456 net->hw_features = 0; 688 NETIF_F_TSO;
457 net->features = NETIF_F_HW_VLAN_CTAG_TX; 689 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
690 NETIF_F_IP_CSUM | NETIF_F_TSO;
458 691
459 SET_ETHTOOL_OPS(net, &ethtool_ops); 692 SET_ETHTOOL_OPS(net, &ethtool_ops);
460 SET_NETDEV_DEV(net, &dev->device); 693 SET_NETDEV_DEV(net, &dev->device);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index b54fd257652b..4a37e3db9e32 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -58,9 +58,6 @@ struct rndis_request {
58 u8 request_ext[RNDIS_EXT_LEN]; 58 u8 request_ext[RNDIS_EXT_LEN];
59}; 59};
60 60
61static void rndis_filter_send_completion(void *ctx);
62
63
64static struct rndis_device *get_rndis_device(void) 61static struct rndis_device *get_rndis_device(void)
65{ 62{
66 struct rndis_device *device; 63 struct rndis_device *device;
@@ -297,7 +294,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
297 "rndis response buffer overflow " 294 "rndis response buffer overflow "
298 "detected (size %u max %zu)\n", 295 "detected (size %u max %zu)\n",
299 resp->msg_len, 296 resp->msg_len,
300 sizeof(struct rndis_filter_packet)); 297 sizeof(struct rndis_message));
301 298
302 if (resp->ndis_msg_type == 299 if (resp->ndis_msg_type ==
303 RNDIS_MSG_RESET_C) { 300 RNDIS_MSG_RESET_C) {
@@ -373,6 +370,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
373 struct rndis_packet *rndis_pkt; 370 struct rndis_packet *rndis_pkt;
374 u32 data_offset; 371 u32 data_offset;
375 struct ndis_pkt_8021q_info *vlan; 372 struct ndis_pkt_8021q_info *vlan;
373 struct ndis_tcp_ip_checksum_info *csum_info;
376 374
377 rndis_pkt = &msg->msg.pkt; 375 rndis_pkt = &msg->msg.pkt;
378 376
@@ -411,7 +409,8 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
411 pkt->vlan_tci = 0; 409 pkt->vlan_tci = 0;
412 } 410 }
413 411
414 netvsc_recv_callback(dev->net_dev->dev, pkt); 412 csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
413 netvsc_recv_callback(dev->net_dev->dev, pkt, csum_info);
415} 414}
416 415
417int rndis_filter_receive(struct hv_device *dev, 416int rndis_filter_receive(struct hv_device *dev,
@@ -630,6 +629,61 @@ cleanup:
630 return ret; 629 return ret;
631} 630}
632 631
632int rndis_filter_set_offload_params(struct hv_device *hdev,
633 struct ndis_offload_params *req_offloads)
634{
635 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
636 struct rndis_device *rdev = nvdev->extension;
637 struct net_device *ndev = nvdev->ndev;
638 struct rndis_request *request;
639 struct rndis_set_request *set;
640 struct ndis_offload_params *offload_params;
641 struct rndis_set_complete *set_complete;
642 u32 extlen = sizeof(struct ndis_offload_params);
643 int ret, t;
644
645 request = get_rndis_request(rdev, RNDIS_MSG_SET,
646 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
647 if (!request)
648 return -ENOMEM;
649
650 set = &request->request_msg.msg.set_req;
651 set->oid = OID_TCP_OFFLOAD_PARAMETERS;
652 set->info_buflen = extlen;
653 set->info_buf_offset = sizeof(struct rndis_set_request);
654 set->dev_vc_handle = 0;
655
656 offload_params = (struct ndis_offload_params *)((ulong)set +
657 set->info_buf_offset);
658 *offload_params = *req_offloads;
659 offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
660 offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
661 offload_params->header.size = extlen;
662
663 ret = rndis_filter_send_request(rdev, request);
664 if (ret != 0)
665 goto cleanup;
666
667 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
668 if (t == 0) {
669 netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n");
670 /* can't put_rndis_request, since we may still receive a
671 * send-completion.
672 */
673 return -EBUSY;
674 } else {
675 set_complete = &request->response_msg.msg.set_complete;
676 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
677 netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
678 set_complete->status);
679 ret = -EINVAL;
680 }
681 }
682
683cleanup:
684 put_rndis_request(rdev, request);
685 return ret;
686}
633 687
634static int rndis_filter_query_device_link_status(struct rndis_device *dev) 688static int rndis_filter_query_device_link_status(struct rndis_device *dev)
635{ 689{
@@ -829,6 +883,7 @@ int rndis_filter_device_add(struct hv_device *dev,
829 struct netvsc_device *net_device; 883 struct netvsc_device *net_device;
830 struct rndis_device *rndis_device; 884 struct rndis_device *rndis_device;
831 struct netvsc_device_info *device_info = additional_info; 885 struct netvsc_device_info *device_info = additional_info;
886 struct ndis_offload_params offloads;
832 887
833 rndis_device = get_rndis_device(); 888 rndis_device = get_rndis_device();
834 if (!rndis_device) 889 if (!rndis_device)
@@ -868,6 +923,26 @@ int rndis_filter_device_add(struct hv_device *dev,
868 923
869 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); 924 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
870 925
926 /* Turn on the offloads; the host supports all of the relevant
927 * offloads.
928 */
929 memset(&offloads, 0, sizeof(struct ndis_offload_params));
930 /* A value of zero means "no change"; now turn on what we
931 * want.
932 */
933 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
934 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
935 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
936 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
937 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
938 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
939
940
941 ret = rndis_filter_set_offload_params(dev, &offloads);
942 if (ret)
943 goto err_dev_remv;
944
945
871 rndis_filter_query_device_link_status(rndis_device); 946 rndis_filter_query_device_link_status(rndis_device);
872 947
873 device_info->link_state = rndis_device->link_state; 948 device_info->link_state = rndis_device->link_state;
@@ -877,6 +952,10 @@ int rndis_filter_device_add(struct hv_device *dev,
877 device_info->link_state ? "down" : "up"); 952 device_info->link_state ? "down" : "up");
878 953
879 return ret; 954 return ret;
955
956err_dev_remv:
957 rndis_filter_device_remove(dev);
958 return ret;
880} 959}
881 960
882void rndis_filter_device_remove(struct hv_device *dev) 961void rndis_filter_device_remove(struct hv_device *dev)
@@ -913,101 +992,3 @@ int rndis_filter_close(struct hv_device *dev)
913 992
914 return rndis_filter_close_device(nvdev->extension); 993 return rndis_filter_close_device(nvdev->extension);
915} 994}
916
917int rndis_filter_send(struct hv_device *dev,
918 struct hv_netvsc_packet *pkt)
919{
920 int ret;
921 struct rndis_filter_packet *filter_pkt;
922 struct rndis_message *rndis_msg;
923 struct rndis_packet *rndis_pkt;
924 u32 rndis_msg_size;
925 bool isvlan = pkt->vlan_tci & VLAN_TAG_PRESENT;
926
927 /* Add the rndis header */
928 filter_pkt = (struct rndis_filter_packet *)pkt->extension;
929
930 rndis_msg = &filter_pkt->msg;
931 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
932 if (isvlan)
933 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
934
935 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
936 rndis_msg->msg_len = pkt->total_data_buflen +
937 rndis_msg_size;
938
939 rndis_pkt = &rndis_msg->msg.pkt;
940 rndis_pkt->data_offset = sizeof(struct rndis_packet);
941 if (isvlan)
942 rndis_pkt->data_offset += NDIS_VLAN_PPI_SIZE;
943 rndis_pkt->data_len = pkt->total_data_buflen;
944
945 if (isvlan) {
946 struct rndis_per_packet_info *ppi;
947 struct ndis_pkt_8021q_info *vlan;
948
949 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
950 rndis_pkt->per_pkt_info_len = NDIS_VLAN_PPI_SIZE;
951
952 ppi = (struct rndis_per_packet_info *)((ulong)rndis_pkt +
953 rndis_pkt->per_pkt_info_offset);
954 ppi->size = NDIS_VLAN_PPI_SIZE;
955 ppi->type = IEEE_8021Q_INFO;
956 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
957
958 vlan = (struct ndis_pkt_8021q_info *)((ulong)ppi +
959 ppi->ppi_offset);
960 vlan->vlanid = pkt->vlan_tci & VLAN_VID_MASK;
961 vlan->pri = (pkt->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
962 }
963
964 pkt->is_data_pkt = true;
965 pkt->page_buf[0].pfn = virt_to_phys(rndis_msg) >> PAGE_SHIFT;
966 pkt->page_buf[0].offset =
967 (unsigned long)rndis_msg & (PAGE_SIZE-1);
968 pkt->page_buf[0].len = rndis_msg_size;
969
970 /* Add one page_buf if the rndis msg goes beyond page boundary */
971 if (pkt->page_buf[0].offset + rndis_msg_size > PAGE_SIZE) {
972 int i;
973 for (i = pkt->page_buf_cnt; i > 1; i--)
974 pkt->page_buf[i] = pkt->page_buf[i-1];
975 pkt->page_buf_cnt++;
976 pkt->page_buf[0].len = PAGE_SIZE - pkt->page_buf[0].offset;
977 pkt->page_buf[1].pfn = virt_to_phys((void *)((ulong)
978 rndis_msg + pkt->page_buf[0].len)) >> PAGE_SHIFT;
979 pkt->page_buf[1].offset = 0;
980 pkt->page_buf[1].len = rndis_msg_size - pkt->page_buf[0].len;
981 }
982
983 /* Save the packet send completion and context */
984 filter_pkt->completion = pkt->completion.send.send_completion;
985 filter_pkt->completion_ctx =
986 pkt->completion.send.send_completion_ctx;
987
988 /* Use ours */
989 pkt->completion.send.send_completion = rndis_filter_send_completion;
990 pkt->completion.send.send_completion_ctx = filter_pkt;
991
992 ret = netvsc_send(dev, pkt);
993 if (ret != 0) {
994 /*
995 * Reset the completion to originals to allow retries from
996 * above
997 */
998 pkt->completion.send.send_completion =
999 filter_pkt->completion;
1000 pkt->completion.send.send_completion_ctx =
1001 filter_pkt->completion_ctx;
1002 }
1003
1004 return ret;
1005}
1006
1007static void rndis_filter_send_completion(void *ctx)
1008{
1009 struct rndis_filter_packet *filter_pkt = ctx;
1010
1011 /* Pass it back to the original handler */
1012 filter_pkt->completion(filter_pkt->completion_ctx);
1013}