1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
9 #include <linux/ethtool.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/wait.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
16 #include <linux/if_ether.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_vlan.h>
19 #include <linux/nls.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/ucs2_string.h>
24 #include "hyperv_net.h"
25 #include "netvsc_trace.h"
27 static void rndis_set_multicast(struct work_struct *w);
29 #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
30 struct rndis_request {
31 struct list_head list_ent;
32 struct completion wait_event;
34 struct rndis_message response_msg;
36 * The buffer for extended info after the RNDIS response message. It's
37 * referenced based on the data offset in the RNDIS message. Its size
38 * is enough for current needs, and should be sufficient for the near
41 u8 response_ext[RNDIS_EXT_LEN];
43 /* Simplify allocation by having a netvsc packet inline */
44 struct hv_netvsc_packet pkt;
46 struct rndis_message request_msg;
48 * The buffer for the extended info after the RNDIS request message.
49 * It is referenced and sized in a similar way as response_ext.
51 u8 request_ext[RNDIS_EXT_LEN];
54 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
55 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
56 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
57 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
58 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
59 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
62 static struct rndis_device *get_rndis_device(void)
64 struct rndis_device *device;
66 device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
70 spin_lock_init(&device->request_lock);
72 INIT_LIST_HEAD(&device->req_list);
73 INIT_WORK(&device->mcast_work, rndis_set_multicast);
75 device->state = RNDIS_DEV_UNINITIALIZED;
80 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
84 struct rndis_request *request;
85 struct rndis_message *rndis_msg;
86 struct rndis_set_request *set;
89 request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
93 init_completion(&request->wait_event);
95 rndis_msg = &request->request_msg;
96 rndis_msg->ndis_msg_type = msg_type;
97 rndis_msg->msg_len = msg_len;
99 request->pkt.q_idx = 0;
102 * Set the request id. This field is always after the rndis header for
103 * request/response packet types so we just used the SetRequest as a
106 set = &rndis_msg->msg.set_req;
107 set->req_id = atomic_inc_return(&dev->new_req_id);
109 /* Add to the request list */
110 spin_lock_irqsave(&dev->request_lock, flags);
111 list_add_tail(&request->list_ent, &dev->req_list);
112 spin_unlock_irqrestore(&dev->request_lock, flags);
117 static void put_rndis_request(struct rndis_device *dev,
118 struct rndis_request *req)
122 spin_lock_irqsave(&dev->request_lock, flags);
123 list_del(&req->list_ent);
124 spin_unlock_irqrestore(&dev->request_lock, flags);
129 static void dump_rndis_message(struct net_device *netdev,
130 const struct rndis_message *rndis_msg,
133 switch (rndis_msg->ndis_msg_type) {
134 case RNDIS_MSG_PACKET:
135 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
136 const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
137 netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
138 "data offset %u data len %u, # oob %u, "
139 "oob offset %u, oob len %u, pkt offset %u, "
144 pkt->num_oob_data_elements,
145 pkt->oob_data_offset,
147 pkt->per_pkt_info_offset,
148 pkt->per_pkt_info_len);
152 case RNDIS_MSG_INIT_C:
153 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
154 sizeof(struct rndis_initialize_complete)) {
155 const struct rndis_initialize_complete *init_complete =
156 data + RNDIS_HEADER_SIZE;
157 netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
158 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
159 "device flags %d, max xfer size 0x%x, max pkts %u, "
162 init_complete->req_id,
163 init_complete->status,
164 init_complete->major_ver,
165 init_complete->minor_ver,
166 init_complete->dev_flags,
167 init_complete->max_xfer_size,
168 init_complete->max_pkt_per_msg,
169 init_complete->pkt_alignment_factor);
173 case RNDIS_MSG_QUERY_C:
174 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
175 sizeof(struct rndis_query_complete)) {
176 const struct rndis_query_complete *query_complete =
177 data + RNDIS_HEADER_SIZE;
178 netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
179 "(len %u, id 0x%x, status 0x%x, buf len %u, "
182 query_complete->req_id,
183 query_complete->status,
184 query_complete->info_buflen,
185 query_complete->info_buf_offset);
189 case RNDIS_MSG_SET_C:
190 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
191 const struct rndis_set_complete *set_complete =
192 data + RNDIS_HEADER_SIZE;
194 "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
196 set_complete->req_id,
197 set_complete->status);
201 case RNDIS_MSG_INDICATE:
202 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
203 sizeof(struct rndis_indicate_status)) {
204 const struct rndis_indicate_status *indicate_status =
205 data + RNDIS_HEADER_SIZE;
206 netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
207 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
209 indicate_status->status,
210 indicate_status->status_buflen,
211 indicate_status->status_buf_offset);
216 netdev_dbg(netdev, "0x%x (len %u)\n",
217 rndis_msg->ndis_msg_type,
223 static int rndis_filter_send_request(struct rndis_device *dev,
224 struct rndis_request *req)
226 struct hv_netvsc_packet *packet;
227 struct hv_page_buffer page_buf[2];
228 struct hv_page_buffer *pb = page_buf;
231 /* Setup the packet to send it */
234 packet->total_data_buflen = req->request_msg.msg_len;
235 packet->page_buf_cnt = 1;
237 pb[0].pfn = virt_to_phys(&req->request_msg) >>
239 pb[0].len = req->request_msg.msg_len;
240 pb[0].offset = offset_in_hvpage(&req->request_msg);
242 /* Add one page_buf when request_msg crossing page boundary */
243 if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
244 packet->page_buf_cnt++;
245 pb[0].len = HV_HYP_PAGE_SIZE -
247 pb[1].pfn = virt_to_phys((void *)&req->request_msg
248 + pb[0].len) >> HV_HYP_PAGE_SHIFT;
250 pb[1].len = req->request_msg.msg_len -
254 trace_rndis_send(dev->ndev, 0, &req->request_msg);
257 ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
258 rcu_read_unlock_bh();
263 static void rndis_set_link_state(struct rndis_device *rdev,
264 struct rndis_request *request)
267 struct rndis_query_complete *query_complete;
268 u32 msg_len = request->response_msg.msg_len;
270 /* Ensure the packet is big enough to access its fields */
271 if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
274 query_complete = &request->response_msg.msg.query_complete;
276 if (query_complete->status == RNDIS_STATUS_SUCCESS &&
277 query_complete->info_buflen >= sizeof(u32) &&
278 query_complete->info_buf_offset >= sizeof(*query_complete) &&
279 msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
280 msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
281 >= query_complete->info_buflen) {
282 memcpy(&link_status, (void *)((unsigned long)query_complete +
283 query_complete->info_buf_offset), sizeof(u32));
284 rdev->link_state = link_status != 0;
288 static void rndis_filter_receive_response(struct net_device *ndev,
289 struct netvsc_device *nvdev,
290 struct rndis_message *resp,
293 u32 *req_id = &resp->msg.init_complete.req_id;
294 struct rndis_device *dev = nvdev->extension;
295 struct rndis_request *request = NULL;
299 /* This should never happen, it means control message
300 * response received after device removed.
302 if (dev->state == RNDIS_DEV_UNINITIALIZED) {
304 "got rndis message uninitialized\n");
308 /* Ensure the packet is big enough to read req_id. Req_id is the 1st
309 * field in any request/response message, so the payload should have at
310 * least sizeof(u32) bytes
312 if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
313 netdev_err(ndev, "rndis msg_len too small: %u\n",
318 /* Copy the request ID into nvchan->recv_buf */
319 *req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
321 spin_lock_irqsave(&dev->request_lock, flags);
322 list_for_each_entry(request, &dev->req_list, list_ent) {
324 * All request/response message contains RequestId as the 1st
327 if (request->request_msg.msg.init_req.req_id == *req_id) {
332 spin_unlock_irqrestore(&dev->request_lock, flags);
336 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
337 memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
338 memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
339 data + RNDIS_HEADER_SIZE + sizeof(*req_id),
340 resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id));
341 if (request->request_msg.ndis_msg_type ==
342 RNDIS_MSG_QUERY && request->request_msg.msg.
343 query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
344 rndis_set_link_state(dev, request);
347 "rndis response buffer overflow "
348 "detected (size %u max %zu)\n",
350 sizeof(struct rndis_message));
352 if (resp->ndis_msg_type ==
354 /* does not have a request id field */
355 request->response_msg.msg.reset_complete.
356 status = RNDIS_STATUS_BUFFER_OVERFLOW;
358 request->response_msg.msg.
359 init_complete.status =
360 RNDIS_STATUS_BUFFER_OVERFLOW;
364 complete(&request->wait_event);
367 "no rndis request found for this response "
368 "(id 0x%x res type 0x%x)\n",
370 resp->ndis_msg_type);
375 * Get the Per-Packet-Info with the specified type
376 * return NULL if not found.
378 static inline void *rndis_get_ppi(struct net_device *ndev,
379 struct rndis_packet *rpkt,
380 u32 rpkt_len, u32 type, u8 internal,
381 u32 ppi_size, void *data)
383 struct rndis_per_packet_info *ppi;
386 if (rpkt->per_pkt_info_offset == 0)
389 /* Validate info_offset and info_len */
390 if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
391 rpkt->per_pkt_info_offset > rpkt_len) {
392 netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
393 rpkt->per_pkt_info_offset);
397 if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
398 rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
399 netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
400 rpkt->per_pkt_info_len);
404 ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
405 rpkt->per_pkt_info_offset);
406 /* Copy the PPIs into nvchan->recv_buf */
407 memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
408 len = rpkt->per_pkt_info_len;
411 /* Validate ppi_offset and ppi_size */
412 if (ppi->size > len) {
413 netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
417 if (ppi->ppi_offset >= ppi->size) {
418 netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
422 if (ppi->type == type && ppi->internal == internal) {
423 /* ppi->size should be big enough to hold the returned object. */
424 if (ppi->size - ppi->ppi_offset < ppi_size) {
425 netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
426 ppi->size, ppi->ppi_offset);
429 return (void *)((ulong)ppi + ppi->ppi_offset);
432 ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
439 void rsc_add_data(struct netvsc_channel *nvchan,
440 const struct ndis_pkt_8021q_info *vlan,
441 const struct ndis_tcp_ip_checksum_info *csum_info,
442 const u32 *hash_info,
445 u32 cnt = nvchan->rsc.cnt;
448 nvchan->rsc.pktlen += len;
450 /* The data/values pointed by vlan, csum_info and hash_info are shared
451 * across the different 'fragments' of the RSC packet; store them into
455 memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
456 nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
458 nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
460 if (csum_info != NULL) {
461 memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
462 nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
464 nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
466 nvchan->rsc.pktlen = len;
467 if (hash_info != NULL) {
468 nvchan->rsc.hash_info = *hash_info;
469 nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
471 nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
475 nvchan->rsc.data[cnt] = data;
476 nvchan->rsc.len[cnt] = len;
480 static int rndis_filter_receive_data(struct net_device *ndev,
481 struct netvsc_device *nvdev,
482 struct netvsc_channel *nvchan,
483 struct rndis_message *msg,
484 void *data, u32 data_buflen)
486 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
487 const struct ndis_tcp_ip_checksum_info *csum_info;
488 const struct ndis_pkt_8021q_info *vlan;
489 const struct rndis_pktinfo_id *pktinfo_id;
490 const u32 *hash_info;
491 u32 data_offset, rpkt_len;
492 bool rsc_more = false;
495 /* Ensure data_buflen is big enough to read header fields */
496 if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
497 netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
499 return NVSP_STAT_FAIL;
502 /* Copy the RNDIS packet into nvchan->recv_buf */
503 memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
505 /* Validate rndis_pkt offset */
506 if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
507 netdev_err(ndev, "invalid rndis packet offset: %u\n",
508 rndis_pkt->data_offset);
509 return NVSP_STAT_FAIL;
512 /* Remove the rndis header and pass it back up the stack */
513 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
515 rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
516 data_buflen -= data_offset;
519 * Make sure we got a valid RNDIS message, now total_data_buflen
520 * should be the data packet size plus the trailer padding size
522 if (unlikely(data_buflen < rndis_pkt->data_len)) {
523 netdev_err(ndev, "rndis message buffer "
524 "overflow detected (got %u, min %u)"
525 "...dropping this message!\n",
526 data_buflen, rndis_pkt->data_len);
527 return NVSP_STAT_FAIL;
530 vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan),
533 csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
534 sizeof(*csum_info), data);
536 hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
537 sizeof(*hash_info), data);
539 pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
540 sizeof(*pktinfo_id), data);
542 /* Identify RSC frags, drop erroneous packets */
543 if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
544 if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
546 else if (nvchan->rsc.cnt == 0)
551 if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
554 if (rsc_more && nvchan->rsc.is_last)
560 if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
563 /* Put data into per channel structure.
564 * Also, remove the rndis trailer padding from rndis packet message
565 * rndis_pkt->data_len tell us the real data length, we only copy
566 * the data packet to the stack, without the rndis trailer padding
568 rsc_add_data(nvchan, vlan, csum_info, hash_info,
569 data + data_offset, rndis_pkt->data_len);
572 return NVSP_STAT_SUCCESS;
574 ret = netvsc_recv_callback(ndev, nvdev, nvchan);
580 return NVSP_STAT_FAIL;
583 int rndis_filter_receive(struct net_device *ndev,
584 struct netvsc_device *net_dev,
585 struct netvsc_channel *nvchan,
586 void *data, u32 buflen)
588 struct net_device_context *net_device_ctx = netdev_priv(ndev);
589 struct rndis_message *rndis_msg = nvchan->recv_buf;
591 if (buflen < RNDIS_HEADER_SIZE) {
592 netdev_err(ndev, "Invalid rndis_msg (buflen: %u)\n", buflen);
593 return NVSP_STAT_FAIL;
596 /* Copy the RNDIS msg header into nvchan->recv_buf */
597 memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
599 /* Validate incoming rndis_message packet */
600 if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
601 buflen < rndis_msg->msg_len) {
602 netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
603 buflen, rndis_msg->msg_len);
604 return NVSP_STAT_FAIL;
607 if (netif_msg_rx_status(net_device_ctx))
608 dump_rndis_message(ndev, rndis_msg, data);
610 switch (rndis_msg->ndis_msg_type) {
611 case RNDIS_MSG_PACKET:
612 return rndis_filter_receive_data(ndev, net_dev, nvchan,
613 rndis_msg, data, buflen);
614 case RNDIS_MSG_INIT_C:
615 case RNDIS_MSG_QUERY_C:
616 case RNDIS_MSG_SET_C:
617 /* completion msgs */
618 rndis_filter_receive_response(ndev, net_dev, rndis_msg, data);
621 case RNDIS_MSG_INDICATE:
622 /* notification msgs */
623 netvsc_linkstatus_callback(ndev, rndis_msg, data, buflen);
627 "unhandled rndis message (type %u len %u)\n",
628 rndis_msg->ndis_msg_type,
630 return NVSP_STAT_FAIL;
633 return NVSP_STAT_SUCCESS;
636 static int rndis_filter_query_device(struct rndis_device *dev,
637 struct netvsc_device *nvdev,
638 u32 oid, void *result, u32 *result_size)
640 struct rndis_request *request;
641 u32 inresult_size = *result_size;
642 struct rndis_query_request *query;
643 struct rndis_query_complete *query_complete;
651 request = get_rndis_request(dev, RNDIS_MSG_QUERY,
652 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
658 /* Setup the rndis query */
659 query = &request->request_msg.msg.query_req;
661 query->info_buf_offset = sizeof(struct rndis_query_request);
662 query->info_buflen = 0;
663 query->dev_vc_handle = 0;
665 if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
666 struct ndis_offload *hwcaps;
667 u32 nvsp_version = nvdev->nvsp_version;
671 if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
672 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
673 size = NDIS_OFFLOAD_SIZE;
674 } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
675 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
676 size = NDIS_OFFLOAD_SIZE_6_1;
678 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
679 size = NDIS_OFFLOAD_SIZE_6_0;
682 request->request_msg.msg_len += size;
683 query->info_buflen = size;
684 hwcaps = (struct ndis_offload *)
685 ((unsigned long)query + query->info_buf_offset);
687 hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
688 hwcaps->header.revision = ndis_rev;
689 hwcaps->header.size = size;
691 } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
692 struct ndis_recv_scale_cap *cap;
694 request->request_msg.msg_len +=
695 sizeof(struct ndis_recv_scale_cap);
696 query->info_buflen = sizeof(struct ndis_recv_scale_cap);
697 cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
698 query->info_buf_offset);
699 cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
700 cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
701 cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
704 ret = rndis_filter_send_request(dev, request);
708 wait_for_completion(&request->wait_event);
710 /* Copy the response back */
711 query_complete = &request->response_msg.msg.query_complete;
712 msg_len = request->response_msg.msg_len;
714 /* Ensure the packet is big enough to access its fields */
715 if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
720 if (query_complete->info_buflen > inresult_size ||
721 query_complete->info_buf_offset < sizeof(*query_complete) ||
722 msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
723 msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
724 < query_complete->info_buflen) {
730 (void *)((unsigned long)query_complete +
731 query_complete->info_buf_offset),
732 query_complete->info_buflen);
734 *result_size = query_complete->info_buflen;
738 put_rndis_request(dev, request);
743 /* Get the hardware offload capabilities */
745 rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
746 struct ndis_offload *caps)
748 u32 caps_len = sizeof(*caps);
751 memset(caps, 0, sizeof(*caps));
753 ret = rndis_filter_query_device(dev, net_device,
754 OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
759 if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
760 netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
765 if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
766 netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
767 caps->header.revision);
771 if (caps->header.size > caps_len ||
772 caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
773 netdev_warn(dev->ndev,
774 "invalid NDIS objsize %u, data size %u\n",
775 caps->header.size, caps_len);
782 static int rndis_filter_query_device_mac(struct rndis_device *dev,
783 struct netvsc_device *net_device)
787 return rndis_filter_query_device(dev, net_device,
788 RNDIS_OID_802_3_PERMANENT_ADDRESS,
789 dev->hw_mac_adr, &size);
792 #define NWADR_STR "NetworkAddress"
793 #define NWADR_STRLEN 14
795 int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
798 struct rndis_device *rdev = nvdev->extension;
799 struct rndis_request *request;
800 struct rndis_set_request *set;
801 struct rndis_config_parameter_info *cpi;
802 wchar_t *cfg_nwadr, *cfg_mac;
803 struct rndis_set_complete *set_complete;
804 char macstr[2*ETH_ALEN+1];
805 u32 extlen = sizeof(struct rndis_config_parameter_info) +
806 2*NWADR_STRLEN + 4*ETH_ALEN;
809 request = get_rndis_request(rdev, RNDIS_MSG_SET,
810 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
814 set = &request->request_msg.msg.set_req;
815 set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
816 set->info_buflen = extlen;
817 set->info_buf_offset = sizeof(struct rndis_set_request);
818 set->dev_vc_handle = 0;
820 cpi = (struct rndis_config_parameter_info *)((ulong)set +
821 set->info_buf_offset);
822 cpi->parameter_name_offset =
823 sizeof(struct rndis_config_parameter_info);
824 /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
825 cpi->parameter_name_length = 2*NWADR_STRLEN;
826 cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
827 cpi->parameter_value_offset =
828 cpi->parameter_name_offset + cpi->parameter_name_length;
829 /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
830 cpi->parameter_value_length = 4*ETH_ALEN;
832 cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
833 cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
834 ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
835 cfg_nwadr, NWADR_STRLEN);
838 snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
839 ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
840 cfg_mac, 2*ETH_ALEN);
844 ret = rndis_filter_send_request(rdev, request);
848 wait_for_completion(&request->wait_event);
850 set_complete = &request->response_msg.msg.set_complete;
851 if (set_complete->status != RNDIS_STATUS_SUCCESS)
855 put_rndis_request(rdev, request);
860 rndis_filter_set_offload_params(struct net_device *ndev,
861 struct netvsc_device *nvdev,
862 struct ndis_offload_params *req_offloads)
864 struct rndis_device *rdev = nvdev->extension;
865 struct rndis_request *request;
866 struct rndis_set_request *set;
867 struct ndis_offload_params *offload_params;
868 struct rndis_set_complete *set_complete;
869 u32 extlen = sizeof(struct ndis_offload_params);
871 u32 vsp_version = nvdev->nvsp_version;
873 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
874 extlen = VERSION_4_OFFLOAD_SIZE;
875 /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
876 * UDP checksum offload.
878 req_offloads->udp_ip_v4_csum = 0;
879 req_offloads->udp_ip_v6_csum = 0;
882 request = get_rndis_request(rdev, RNDIS_MSG_SET,
883 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
887 set = &request->request_msg.msg.set_req;
888 set->oid = OID_TCP_OFFLOAD_PARAMETERS;
889 set->info_buflen = extlen;
890 set->info_buf_offset = sizeof(struct rndis_set_request);
891 set->dev_vc_handle = 0;
893 offload_params = (struct ndis_offload_params *)((ulong)set +
894 set->info_buf_offset);
895 *offload_params = *req_offloads;
896 offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
897 offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
898 offload_params->header.size = extlen;
900 ret = rndis_filter_send_request(rdev, request);
904 wait_for_completion(&request->wait_event);
905 set_complete = &request->response_msg.msg.set_complete;
906 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
907 netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
908 set_complete->status);
913 put_rndis_request(rdev, request);
917 static int rndis_set_rss_param_msg(struct rndis_device *rdev,
918 const u8 *rss_key, u16 flag)
920 struct net_device *ndev = rdev->ndev;
921 struct net_device_context *ndc = netdev_priv(ndev);
922 struct rndis_request *request;
923 struct rndis_set_request *set;
924 struct rndis_set_complete *set_complete;
925 u32 extlen = sizeof(struct ndis_recv_scale_param) +
926 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
927 struct ndis_recv_scale_param *rssp;
932 request = get_rndis_request(
934 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
938 set = &request->request_msg.msg.set_req;
939 set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
940 set->info_buflen = extlen;
941 set->info_buf_offset = sizeof(struct rndis_set_request);
942 set->dev_vc_handle = 0;
944 rssp = (struct ndis_recv_scale_param *)(set + 1);
945 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
946 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
947 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
949 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
950 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
952 rssp->indirect_tabsize = 4*ITAB_NUM;
953 rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
954 rssp->hashkey_size = NETVSC_HASH_KEYLEN;
955 rssp->hashkey_offset = rssp->indirect_taboffset +
956 rssp->indirect_tabsize;
958 /* Set indirection table entries */
959 itab = (u32 *)(rssp + 1);
960 for (i = 0; i < ITAB_NUM; i++)
961 itab[i] = ndc->rx_table[i];
963 /* Set hask key values */
964 keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
965 memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
967 ret = rndis_filter_send_request(rdev, request);
971 wait_for_completion(&request->wait_event);
972 set_complete = &request->response_msg.msg.set_complete;
973 if (set_complete->status == RNDIS_STATUS_SUCCESS) {
974 if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
975 !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
976 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
979 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
980 set_complete->status);
985 put_rndis_request(rdev, request);
989 int rndis_filter_set_rss_param(struct rndis_device *rdev,
992 /* Disable RSS before change */
993 rndis_set_rss_param_msg(rdev, rss_key,
994 NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
996 return rndis_set_rss_param_msg(rdev, rss_key, 0);
999 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
1000 struct netvsc_device *net_device)
1002 u32 size = sizeof(u32);
1005 return rndis_filter_query_device(dev, net_device,
1006 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
1007 &link_status, &size);
1010 static int rndis_filter_query_link_speed(struct rndis_device *dev,
1011 struct netvsc_device *net_device)
1013 u32 size = sizeof(u32);
1015 struct net_device_context *ndc;
1018 ret = rndis_filter_query_device(dev, net_device,
1019 RNDIS_OID_GEN_LINK_SPEED,
1020 &link_speed, &size);
1023 ndc = netdev_priv(dev->ndev);
1025 /* The link speed reported from host is in 100bps unit, so
1026 * we convert it to Mbps here.
1028 ndc->speed = link_speed / 10000;
1034 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
1037 struct rndis_request *request;
1038 struct rndis_set_request *set;
1041 if (dev->filter == new_filter)
1044 request = get_rndis_request(dev, RNDIS_MSG_SET,
1045 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
1050 /* Setup the rndis set */
1051 set = &request->request_msg.msg.set_req;
1052 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
1053 set->info_buflen = sizeof(u32);
1054 set->info_buf_offset = offsetof(typeof(*set), info_buf);
1055 memcpy(set->info_buf, &new_filter, sizeof(u32));
1057 ret = rndis_filter_send_request(dev, request);
1059 wait_for_completion(&request->wait_event);
1060 dev->filter = new_filter;
1063 put_rndis_request(dev, request);
1068 static void rndis_set_multicast(struct work_struct *w)
1070 struct rndis_device *rdev
1071 = container_of(w, struct rndis_device, mcast_work);
1072 u32 filter = NDIS_PACKET_TYPE_DIRECTED;
1073 unsigned int flags = rdev->ndev->flags;
1075 if (flags & IFF_PROMISC) {
1076 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
1078 if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
1079 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
1080 if (flags & IFF_BROADCAST)
1081 filter |= NDIS_PACKET_TYPE_BROADCAST;
1084 rndis_filter_set_packet_filter(rdev, filter);
1087 void rndis_filter_update(struct netvsc_device *nvdev)
1089 struct rndis_device *rdev = nvdev->extension;
1091 schedule_work(&rdev->mcast_work);
1094 static int rndis_filter_init_device(struct rndis_device *dev,
1095 struct netvsc_device *nvdev)
1097 struct rndis_request *request;
1098 struct rndis_initialize_request *init;
1099 struct rndis_initialize_complete *init_complete;
1103 request = get_rndis_request(dev, RNDIS_MSG_INIT,
1104 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
1110 /* Setup the rndis set */
1111 init = &request->request_msg.msg.init_req;
1112 init->major_ver = RNDIS_MAJOR_VERSION;
1113 init->minor_ver = RNDIS_MINOR_VERSION;
1114 init->max_xfer_size = 0x4000;
1116 dev->state = RNDIS_DEV_INITIALIZING;
1118 ret = rndis_filter_send_request(dev, request);
1120 dev->state = RNDIS_DEV_UNINITIALIZED;
1124 wait_for_completion(&request->wait_event);
1126 init_complete = &request->response_msg.msg.init_complete;
1127 status = init_complete->status;
1128 if (status == RNDIS_STATUS_SUCCESS) {
1129 dev->state = RNDIS_DEV_INITIALIZED;
1130 nvdev->max_pkt = init_complete->max_pkt_per_msg;
1131 nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
1134 dev->state = RNDIS_DEV_UNINITIALIZED;
1140 put_rndis_request(dev, request);
1145 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
1149 for (i = 0; i < nvdev->num_chn; i++) {
1150 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1152 if (nvchan->mrc.first != nvchan->mrc.next)
1155 if (atomic_read(&nvchan->queue_sends) > 0)
1162 static void rndis_filter_halt_device(struct netvsc_device *nvdev,
1163 struct rndis_device *dev)
1165 struct rndis_request *request;
1166 struct rndis_halt_request *halt;
1168 /* Attempt to do a rndis device halt */
1169 request = get_rndis_request(dev, RNDIS_MSG_HALT,
1170 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
1174 /* Setup the rndis set */
1175 halt = &request->request_msg.msg.halt_req;
1176 halt->req_id = atomic_inc_return(&dev->new_req_id);
1178 /* Ignore return since this msg is optional. */
1179 rndis_filter_send_request(dev, request);
1181 dev->state = RNDIS_DEV_UNINITIALIZED;
1184 nvdev->destroy = true;
1186 /* Force flag to be ordered before waiting */
1189 /* Wait for all send completions */
1190 wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
1193 put_rndis_request(dev, request);
1196 static int rndis_filter_open_device(struct rndis_device *dev)
1200 if (dev->state != RNDIS_DEV_INITIALIZED)
1203 ret = rndis_filter_set_packet_filter(dev,
1204 NDIS_PACKET_TYPE_BROADCAST |
1205 NDIS_PACKET_TYPE_ALL_MULTICAST |
1206 NDIS_PACKET_TYPE_DIRECTED);
1208 dev->state = RNDIS_DEV_DATAINITIALIZED;
1213 static int rndis_filter_close_device(struct rndis_device *dev)
1217 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1220 /* Make sure rndis_set_multicast doesn't re-enable filter! */
1221 cancel_work_sync(&dev->mcast_work);
1223 ret = rndis_filter_set_packet_filter(dev, 0);
1228 dev->state = RNDIS_DEV_INITIALIZED;
1233 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1235 struct net_device *ndev =
1236 hv_get_drvdata(new_sc->primary_channel->device_obj);
1237 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1238 struct netvsc_device *nvscdev;
1239 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1240 struct netvsc_channel *nvchan;
1243 /* This is safe because this callback only happens when
1244 * new device is being setup and waiting on the channel_init_wait.
1246 nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1247 if (!nvscdev || chn_index >= nvscdev->num_chn)
1250 nvchan = nvscdev->chan_table + chn_index;
1252 /* Because the device uses NAPI, all the interrupt batching and
1253 * control is done via Net softirq, not the channel handling
1255 set_channel_read_mode(new_sc, HV_CALL_ISR);
1257 /* Set the channel before opening.*/
1258 nvchan->channel = new_sc;
1260 new_sc->next_request_id_callback = vmbus_next_request_id;
1261 new_sc->request_addr_callback = vmbus_request_addr;
1262 new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1263 new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1265 ret = vmbus_open(new_sc, netvsc_ring_bytes,
1266 netvsc_ring_bytes, NULL, 0,
1267 netvsc_channel_cb, nvchan);
1269 napi_enable(&nvchan->napi);
1271 netdev_notice(ndev, "sub channel open failed: %d\n", ret);
1273 if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
1274 wake_up(&nvscdev->subchan_open);
1277 /* Open sub-channels after completing the handling of the device probe.
1278 * This breaks overlap of processing the host message for the
1279 * new primary channel with the initialization of sub-channels.
1281 int rndis_set_subchannel(struct net_device *ndev,
1282 struct netvsc_device *nvdev,
1283 struct netvsc_device_info *dev_info)
1285 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1286 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1287 struct hv_device *hv_dev = ndev_ctx->device_ctx;
1288 struct rndis_device *rdev = nvdev->extension;
1293 memset(init_packet, 0, sizeof(struct nvsp_message));
1294 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1295 init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1296 init_packet->msg.v5_msg.subchn_req.num_subchannels =
1298 trace_nvsp_send(ndev, init_packet);
1300 ret = vmbus_sendpacket(hv_dev->channel, init_packet,
1301 sizeof(struct nvsp_message),
1302 (unsigned long)init_packet,
1304 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1306 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1310 wait_for_completion(&nvdev->channel_init_wait);
1311 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1312 netdev_err(ndev, "sub channel request failed\n");
1316 /* Check that number of allocated sub channel is within the expected range */
1317 if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
1318 netdev_err(ndev, "invalid number of allocated sub channel\n");
1321 nvdev->num_chn = 1 +
1322 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1324 /* wait for all sub channels to open */
1325 wait_event(nvdev->subchan_open,
1326 atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1328 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1329 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1331 /* ignore failures from setting rss parameters, still have channels */
1333 rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1335 rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1337 netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1338 netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
1343 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1344 struct netvsc_device *nvdev)
1346 struct net_device *net = rndis_device->ndev;
1347 struct net_device_context *net_device_ctx = netdev_priv(net);
1348 struct ndis_offload hwcaps;
1349 struct ndis_offload_params offloads;
1350 unsigned int gso_max_size = GSO_MAX_SIZE;
1353 /* Find HW offload capabilities */
1354 ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
1358 /* A value of zero means "no change"; now turn on what we want. */
1359 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1361 /* Linux does not care about IP checksum, always does in kernel */
1362 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1364 /* Reset previously set hw_features flags */
1365 net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1366 net_device_ctx->tx_checksum_mask = 0;
1368 /* Compute tx offload settings based on hw capabilities */
1369 net->hw_features |= NETIF_F_RXCSUM;
1370 net->hw_features |= NETIF_F_SG;
1371 net->hw_features |= NETIF_F_RXHASH;
1373 if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1374 /* Can checksum TCP */
1375 net->hw_features |= NETIF_F_IP_CSUM;
1376 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1378 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1380 if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1381 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1382 net->hw_features |= NETIF_F_TSO;
1384 if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1385 gso_max_size = hwcaps.lsov2.ip4_maxsz;
1388 if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1389 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1390 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1394 if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1395 net->hw_features |= NETIF_F_IPV6_CSUM;
1397 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1398 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1400 if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1401 (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1402 offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1403 net->hw_features |= NETIF_F_TSO6;
1405 if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1406 gso_max_size = hwcaps.lsov2.ip6_maxsz;
1409 if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1410 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1411 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1415 if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
1416 net->hw_features |= NETIF_F_LRO;
1418 if (net->features & NETIF_F_LRO) {
1419 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1420 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1422 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1423 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1427 /* In case some hw_features disappeared we need to remove them from
1428 * net->features list as they're no longer supported.
1430 net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1432 netif_set_gso_max_size(net, gso_max_size);
1434 ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
1439 static void rndis_get_friendly_name(struct net_device *net,
1440 struct rndis_device *rndis_device,
1441 struct netvsc_device *net_device)
1443 ucs2_char_t wname[256];
1448 size = sizeof(wname);
1449 if (rndis_filter_query_device(rndis_device, net_device,
1450 RNDIS_OID_GEN_FRIENDLY_NAME,
1452 return; /* ignore if host does not support */
1455 return; /* name not set */
1457 /* Convert Windows Unicode string to UTF-8 */
1458 len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
1460 /* ignore the default value from host */
1461 if (strcmp(ifalias, "Network Adapter") != 0)
1462 dev_set_alias(net, ifalias, len);
1465 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1466 struct netvsc_device_info *device_info)
1468 struct net_device *net = hv_get_drvdata(dev);
1469 struct net_device_context *ndc = netdev_priv(net);
1470 struct netvsc_device *net_device;
1471 struct rndis_device *rndis_device;
1472 struct ndis_recv_scale_cap rsscap;
1473 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1475 u32 num_possible_rss_qs;
1478 rndis_device = get_rndis_device();
1480 return ERR_PTR(-ENODEV);
1482 /* Let the inner driver handle this first to create the netvsc channel
1483 * NOTE! Once the channel is created, we may get a receive callback
1484 * (RndisFilterOnReceive()) before this call is completed
1486 net_device = netvsc_device_add(dev, device_info);
1487 if (IS_ERR(net_device)) {
1488 kfree(rndis_device);
1492 /* Initialize the rndis device */
1493 net_device->max_chn = 1;
1494 net_device->num_chn = 1;
1496 net_device->extension = rndis_device;
1497 rndis_device->ndev = net;
1499 /* Send the rndis initialization message */
1500 ret = rndis_filter_init_device(rndis_device, net_device);
1504 /* Get the MTU from the host */
1506 ret = rndis_filter_query_device(rndis_device, net_device,
1507 RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1509 if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1512 /* Get the mac address */
1513 ret = rndis_filter_query_device_mac(rndis_device, net_device);
1517 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1519 /* Get friendly name as ifalias*/
1521 rndis_get_friendly_name(net, rndis_device, net_device);
1523 /* Query and set hardware capabilities */
1524 ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
1528 rndis_filter_query_device_link_status(rndis_device, net_device);
1530 netdev_dbg(net, "Device MAC %pM link state %s\n",
1531 rndis_device->hw_mac_adr,
1532 rndis_device->link_state ? "down" : "up");
1534 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1537 rndis_filter_query_link_speed(rndis_device, net_device);
1540 memset(&rsscap, 0, rsscap_size);
1541 ret = rndis_filter_query_device(rndis_device, net_device,
1542 OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1543 &rsscap, &rsscap_size);
1544 if (ret || rsscap.num_recv_que < 2)
1547 /* This guarantees that num_possible_rss_qs <= num_online_cpus */
1548 num_possible_rss_qs = min_t(u32, num_online_cpus(),
1549 rsscap.num_recv_que);
1551 net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1553 /* We will use the given number of channels if available. */
1554 net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1556 if (!netif_is_rxfh_configured(net)) {
1557 for (i = 0; i < ITAB_NUM; i++)
1558 ndc->rx_table[i] = ethtool_rxfh_indir_default(
1559 i, net_device->num_chn);
1562 atomic_set(&net_device->open_chn, 1);
1563 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1565 for (i = 1; i < net_device->num_chn; i++) {
1566 ret = netvsc_alloc_recv_comp_ring(net_device, i);
1569 vfree(net_device->chan_table[i].mrc.slots);
1574 for (i = 1; i < net_device->num_chn; i++)
1575 netif_napi_add(net, &net_device->chan_table[i].napi,
1576 netvsc_poll, NAPI_POLL_WEIGHT);
1581 /* setting up multiple channels failed */
1582 net_device->max_chn = 1;
1583 net_device->num_chn = 1;
1587 rndis_filter_device_remove(dev, net_device);
1588 return ERR_PTR(ret);
1591 void rndis_filter_device_remove(struct hv_device *dev,
1592 struct netvsc_device *net_dev)
1594 struct rndis_device *rndis_dev = net_dev->extension;
1596 /* Halt and release the rndis device */
1597 rndis_filter_halt_device(net_dev, rndis_dev);
1599 netvsc_device_remove(dev);
1602 int rndis_filter_open(struct netvsc_device *nvdev)
1607 return rndis_filter_open_device(nvdev->extension);
1610 int rndis_filter_close(struct netvsc_device *nvdev)
1615 return rndis_filter_close_device(nvdev->extension);