]>
Commit | Line | Data |
---|---|---|
48925e37 | 1 | /* A network driver using virtio. |
296f96fc RR |
2 | * |
3 | * Copyright 2007 Rusty Russell <[email protected]> IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
adf8d3ff | 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
296f96fc RR |
17 | */ |
18 | //#define DEBUG | |
19 | #include <linux/netdevice.h> | |
20 | #include <linux/etherdevice.h> | |
a9ea3fc6 | 21 | #include <linux/ethtool.h> |
296f96fc RR |
22 | #include <linux/module.h> |
23 | #include <linux/virtio.h> | |
24 | #include <linux/virtio_net.h> | |
f600b690 | 25 | #include <linux/bpf.h> |
a67edbf4 | 26 | #include <linux/bpf_trace.h> |
296f96fc | 27 | #include <linux/scatterlist.h> |
e918085a | 28 | #include <linux/if_vlan.h> |
5a0e3ad6 | 29 | #include <linux/slab.h> |
8de4b2f3 | 30 | #include <linux/cpu.h> |
ab7db917 | 31 | #include <linux/average.h> |
186b3c99 | 32 | #include <linux/filter.h> |
2ca653d6 | 33 | #include <linux/kernel.h> |
ba5e4426 | 34 | #include <linux/pci.h> |
d85b758f | 35 | #include <net/route.h> |
754b8a21 | 36 | #include <net/xdp.h> |
ba5e4426 | 37 | #include <net/net_failover.h> |
296f96fc | 38 | |
d34710e3 | 39 | static int napi_weight = NAPI_POLL_WEIGHT; |
6c0cd7c0 DL |
40 | module_param(napi_weight, int, 0444); |
41 | ||
b92f1e67 | 42 | static bool csum = true, gso = true, napi_tx; |
34a48579 RR |
43 | module_param(csum, bool, 0444); |
44 | module_param(gso, bool, 0444); | |
b92f1e67 | 45 | module_param(napi_tx, bool, 0644); |
34a48579 | 46 | |
296f96fc | 47 | /* FIXME: MTU in config. */ |
5061de36 | 48 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
3f2c31d9 | 49 | #define GOOD_COPY_LEN 128 |
296f96fc | 50 | |
f6b10209 JW |
51 | #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) |
52 | ||
2de2f7f4 JF |
53 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ |
54 | #define VIRTIO_XDP_HEADROOM 256 | |
55 | ||
2471c75e JDB |
56 | /* Separating two types of XDP xmit */ |
57 | #define VIRTIO_XDP_TX BIT(0) | |
58 | #define VIRTIO_XDP_REDIR BIT(1) | |
59 | ||
5377d758 JB |
60 | /* RX packet size EWMA. The average packet size is used to determine the packet |
61 | * buffer size when refilling RX rings. As the entire RX ring may be refilled | |
62 | * at once, the weight is chosen so that the EWMA will be insensitive to short- | |
63 | * term, transient changes in packet size. | |
ab7db917 | 64 | */ |
eb1e011a | 65 | DECLARE_EWMA(pkt_len, 0, 64) |
ab7db917 | 66 | |
66846048 | 67 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
2a41f71d | 68 | |
7acd4329 CIK |
69 | static const unsigned long guest_offloads[] = { |
70 | VIRTIO_NET_F_GUEST_TSO4, | |
71 | VIRTIO_NET_F_GUEST_TSO6, | |
72 | VIRTIO_NET_F_GUEST_ECN, | |
e59ff2c4 JW |
73 | VIRTIO_NET_F_GUEST_UFO, |
74 | VIRTIO_NET_F_GUEST_CSUM | |
7acd4329 | 75 | }; |
3f93522f | 76 | |
d7dfc5cf TM |
77 | struct virtnet_stat_desc { |
78 | char desc[ETH_GSTRING_LEN]; | |
79 | size_t offset; | |
3fa2a1df | 80 | }; |
81 | ||
d7dfc5cf TM |
82 | struct virtnet_sq_stats { |
83 | struct u64_stats_sync syncp; | |
84 | u64 packets; | |
85 | u64 bytes; | |
5b8f3c8d TM |
86 | u64 xdp_tx; |
87 | u64 xdp_tx_drops; | |
461f03dc | 88 | u64 kicks; |
d7dfc5cf TM |
89 | }; |
90 | ||
d46eeeaf JW |
91 | struct virtnet_rq_stats { |
92 | struct u64_stats_sync syncp; | |
d7dfc5cf TM |
93 | u64 packets; |
94 | u64 bytes; | |
2c4a2f7d | 95 | u64 drops; |
5b8f3c8d TM |
96 | u64 xdp_packets; |
97 | u64 xdp_tx; | |
98 | u64 xdp_redirects; | |
99 | u64 xdp_drops; | |
461f03dc | 100 | u64 kicks; |
d7dfc5cf TM |
101 | }; |
102 | ||
103 | #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) | |
d46eeeaf | 104 | #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) |
d7dfc5cf TM |
105 | |
106 | static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { | |
5b8f3c8d TM |
107 | { "packets", VIRTNET_SQ_STAT(packets) }, |
108 | { "bytes", VIRTNET_SQ_STAT(bytes) }, | |
109 | { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, | |
110 | { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, | |
461f03dc | 111 | { "kicks", VIRTNET_SQ_STAT(kicks) }, |
d7dfc5cf TM |
112 | }; |
113 | ||
114 | static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { | |
5b8f3c8d TM |
115 | { "packets", VIRTNET_RQ_STAT(packets) }, |
116 | { "bytes", VIRTNET_RQ_STAT(bytes) }, | |
117 | { "drops", VIRTNET_RQ_STAT(drops) }, | |
118 | { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, | |
119 | { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, | |
120 | { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, | |
121 | { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, | |
461f03dc | 122 | { "kicks", VIRTNET_RQ_STAT(kicks) }, |
d7dfc5cf TM |
123 | }; |
124 | ||
125 | #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) | |
126 | #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) | |
127 | ||
e9d7417b JW |
128 | /* Internal representation of a send virtqueue */ |
129 | struct send_queue { | |
130 | /* Virtqueue associated with this send _queue */ | |
131 | struct virtqueue *vq; | |
132 | ||
133 | /* TX: fragments + linear part + virtio header */ | |
134 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d JW |
135 | |
136 | /* Name of the send queue: output.$index */ | |
137 | char name[40]; | |
b92f1e67 | 138 | |
d7dfc5cf TM |
139 | struct virtnet_sq_stats stats; |
140 | ||
b92f1e67 | 141 | struct napi_struct napi; |
e9d7417b JW |
142 | }; |
143 | ||
144 | /* Internal representation of a receive virtqueue */ | |
145 | struct receive_queue { | |
146 | /* Virtqueue associated with this receive_queue */ | |
147 | struct virtqueue *vq; | |
148 | ||
296f96fc RR |
149 | struct napi_struct napi; |
150 | ||
f600b690 JF |
151 | struct bpf_prog __rcu *xdp_prog; |
152 | ||
d7dfc5cf TM |
153 | struct virtnet_rq_stats stats; |
154 | ||
e9d7417b JW |
155 | /* Chain pages by the private ptr. */ |
156 | struct page *pages; | |
157 | ||
ab7db917 | 158 | /* Average packet length for mergeable receive buffers. */ |
5377d758 | 159 | struct ewma_pkt_len mrg_avg_pkt_len; |
ab7db917 | 160 | |
fb51879d MD |
161 | /* Page frag for packet buffer allocation. */ |
162 | struct page_frag alloc_frag; | |
163 | ||
e9d7417b JW |
164 | /* RX: fragments + linear part + virtio header */ |
165 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d | 166 | |
d85b758f MT |
167 | /* Min single buffer size for mergeable buffers case. */ |
168 | unsigned int min_buf_len; | |
169 | ||
986a4f4d JW |
170 | /* Name of this receive queue: input.$index */ |
171 | char name[40]; | |
754b8a21 JDB |
172 | |
173 | struct xdp_rxq_info xdp_rxq; | |
e9d7417b JW |
174 | }; |
175 | ||
12e57169 MT |
176 | /* Control VQ buffers: protected by the rtnl lock */ |
177 | struct control_buf { | |
178 | struct virtio_net_ctrl_hdr hdr; | |
179 | virtio_net_ctrl_ack status; | |
180 | struct virtio_net_ctrl_mq mq; | |
181 | u8 promisc; | |
182 | u8 allmulti; | |
d7fad4c8 | 183 | __virtio16 vid; |
f4ee703a | 184 | __virtio64 offloads; |
12e57169 MT |
185 | }; |
186 | ||
e9d7417b JW |
187 | struct virtnet_info { |
188 | struct virtio_device *vdev; | |
189 | struct virtqueue *cvq; | |
190 | struct net_device *dev; | |
986a4f4d JW |
191 | struct send_queue *sq; |
192 | struct receive_queue *rq; | |
e9d7417b JW |
193 | unsigned int status; |
194 | ||
986a4f4d JW |
195 | /* Max # of queue pairs supported by the device */ |
196 | u16 max_queue_pairs; | |
197 | ||
198 | /* # of queue pairs currently used by the driver */ | |
199 | u16 curr_queue_pairs; | |
200 | ||
672aafd5 JF |
201 | /* # of XDP queue pairs currently used by the driver */ |
202 | u16 xdp_queue_pairs; | |
203 | ||
97402b96 HX |
204 | /* I like... big packets and I cannot lie! */ |
205 | bool big_packets; | |
206 | ||
3f2c31d9 MM |
207 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
208 | bool mergeable_rx_bufs; | |
209 | ||
986a4f4d JW |
210 | /* Has control virtqueue */ |
211 | bool has_cvq; | |
212 | ||
e7428e95 MT |
213 | /* Host can handle any s/g split between our header and packet data */ |
214 | bool any_header_sg; | |
215 | ||
012873d0 MT |
216 | /* Packet virtio header size */ |
217 | u8 hdr_len; | |
218 | ||
3161e453 RR |
219 | /* Work struct for refilling if we run low on memory. */ |
220 | struct delayed_work refill; | |
221 | ||
586d17c5 JW |
222 | /* Work struct for config space updates */ |
223 | struct work_struct config_work; | |
224 | ||
986a4f4d JW |
225 | /* Does the affinity hint is set for virtqueues? */ |
226 | bool affinity_hint_set; | |
47be2479 | 227 | |
8017c279 SAS |
228 | /* CPU hotplug instances for online & dead */ |
229 | struct hlist_node node; | |
230 | struct hlist_node node_dead; | |
2ac46030 | 231 | |
12e57169 | 232 | struct control_buf *ctrl; |
16032be5 NA |
233 | |
234 | /* Ethtool settings */ | |
235 | u8 duplex; | |
236 | u32 speed; | |
3f93522f JW |
237 | |
238 | unsigned long guest_offloads; | |
ba5e4426 SS |
239 | |
240 | /* failover when STANDBY feature enabled */ | |
241 | struct failover *failover; | |
296f96fc RR |
242 | }; |
243 | ||
9ab86bbc | 244 | struct padded_vnet_hdr { |
012873d0 | 245 | struct virtio_net_hdr_mrg_rxbuf hdr; |
9ab86bbc | 246 | /* |
012873d0 MT |
247 | * hdr is in a separate sg buffer, and data sg buffer shares same page |
248 | * with this header sg. This padding makes next sg 16 byte aligned | |
249 | * after the header. | |
9ab86bbc | 250 | */ |
012873d0 | 251 | char padding[4]; |
9ab86bbc SM |
252 | }; |
253 | ||
986a4f4d JW |
254 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
255 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq | |
256 | */ | |
257 | static int vq2txq(struct virtqueue *vq) | |
258 | { | |
9d0ca6ed | 259 | return (vq->index - 1) / 2; |
986a4f4d JW |
260 | } |
261 | ||
262 | static int txq2vq(int txq) | |
263 | { | |
264 | return txq * 2 + 1; | |
265 | } | |
266 | ||
267 | static int vq2rxq(struct virtqueue *vq) | |
268 | { | |
9d0ca6ed | 269 | return vq->index / 2; |
986a4f4d JW |
270 | } |
271 | ||
272 | static int rxq2vq(int rxq) | |
273 | { | |
274 | return rxq * 2; | |
275 | } | |
276 | ||
012873d0 | 277 | static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) |
296f96fc | 278 | { |
012873d0 | 279 | return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; |
296f96fc RR |
280 | } |
281 | ||
9ab86bbc SM |
282 | /* |
283 | * private is used to chain pages for big packets, put the whole | |
284 | * most recent used list in the beginning for reuse | |
285 | */ | |
e9d7417b | 286 | static void give_pages(struct receive_queue *rq, struct page *page) |
0a888fd1 | 287 | { |
9ab86bbc | 288 | struct page *end; |
0a888fd1 | 289 | |
e9d7417b | 290 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
9ab86bbc | 291 | for (end = page; end->private; end = (struct page *)end->private); |
e9d7417b JW |
292 | end->private = (unsigned long)rq->pages; |
293 | rq->pages = page; | |
0a888fd1 MM |
294 | } |
295 | ||
e9d7417b | 296 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
fb6813f4 | 297 | { |
e9d7417b | 298 | struct page *p = rq->pages; |
fb6813f4 | 299 | |
9ab86bbc | 300 | if (p) { |
e9d7417b | 301 | rq->pages = (struct page *)p->private; |
9ab86bbc SM |
302 | /* clear private here, it is used to chain pages */ |
303 | p->private = 0; | |
304 | } else | |
fb6813f4 RR |
305 | p = alloc_page(gfp_mask); |
306 | return p; | |
307 | } | |
308 | ||
e4e8452a WB |
309 | static void virtqueue_napi_schedule(struct napi_struct *napi, |
310 | struct virtqueue *vq) | |
311 | { | |
312 | if (napi_schedule_prep(napi)) { | |
313 | virtqueue_disable_cb(vq); | |
314 | __napi_schedule(napi); | |
315 | } | |
316 | } | |
317 | ||
318 | static void virtqueue_napi_complete(struct napi_struct *napi, | |
319 | struct virtqueue *vq, int processed) | |
320 | { | |
321 | int opaque; | |
322 | ||
323 | opaque = virtqueue_enable_cb_prepare(vq); | |
fdaa767a TM |
324 | if (napi_complete_done(napi, processed)) { |
325 | if (unlikely(virtqueue_poll(vq, opaque))) | |
326 | virtqueue_napi_schedule(napi, vq); | |
327 | } else { | |
328 | virtqueue_disable_cb(vq); | |
329 | } | |
e4e8452a WB |
330 | } |
331 | ||
e9d7417b | 332 | static void skb_xmit_done(struct virtqueue *vq) |
296f96fc | 333 | { |
e9d7417b | 334 | struct virtnet_info *vi = vq->vdev->priv; |
b92f1e67 | 335 | struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; |
296f96fc | 336 | |
2cb9c6ba | 337 | /* Suppress further interrupts. */ |
e9d7417b | 338 | virtqueue_disable_cb(vq); |
11a3a154 | 339 | |
b92f1e67 WB |
340 | if (napi->weight) |
341 | virtqueue_napi_schedule(napi, vq); | |
342 | else | |
343 | /* We were probably waiting for more output buffers. */ | |
344 | netif_wake_subqueue(vi->dev, vq2txq(vq)); | |
296f96fc RR |
345 | } |
346 | ||
28b39bc7 JW |
347 | #define MRG_CTX_HEADER_SHIFT 22 |
348 | static void *mergeable_len_to_ctx(unsigned int truesize, | |
349 | unsigned int headroom) | |
350 | { | |
351 | return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); | |
352 | } | |
353 | ||
354 | static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) | |
355 | { | |
356 | return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; | |
357 | } | |
358 | ||
359 | static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) | |
360 | { | |
361 | return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); | |
362 | } | |
363 | ||
3464645a | 364 | /* Called from bottom half context */ |
946fa564 MT |
365 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
366 | struct receive_queue *rq, | |
2613af0e MD |
367 | struct page *page, unsigned int offset, |
368 | unsigned int len, unsigned int truesize) | |
9ab86bbc SM |
369 | { |
370 | struct sk_buff *skb; | |
012873d0 | 371 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
2613af0e | 372 | unsigned int copy, hdr_len, hdr_padded_len; |
9ab86bbc | 373 | char *p; |
fb6813f4 | 374 | |
2613af0e | 375 | p = page_address(page) + offset; |
3f2c31d9 | 376 | |
9ab86bbc | 377 | /* copy small packet so we can reuse these pages for small data */ |
c67f5db8 | 378 | skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); |
9ab86bbc SM |
379 | if (unlikely(!skb)) |
380 | return NULL; | |
3f2c31d9 | 381 | |
9ab86bbc | 382 | hdr = skb_vnet_hdr(skb); |
3f2c31d9 | 383 | |
012873d0 MT |
384 | hdr_len = vi->hdr_len; |
385 | if (vi->mergeable_rx_bufs) | |
a4a76503 | 386 | hdr_padded_len = sizeof(*hdr); |
012873d0 | 387 | else |
2613af0e | 388 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
3f2c31d9 | 389 | |
9ab86bbc | 390 | memcpy(hdr, p, hdr_len); |
3f2c31d9 | 391 | |
9ab86bbc | 392 | len -= hdr_len; |
2613af0e MD |
393 | offset += hdr_padded_len; |
394 | p += hdr_padded_len; | |
3f2c31d9 | 395 | |
9ab86bbc SM |
396 | copy = len; |
397 | if (copy > skb_tailroom(skb)) | |
398 | copy = skb_tailroom(skb); | |
59ae1d12 | 399 | skb_put_data(skb, p, copy); |
3f2c31d9 | 400 | |
9ab86bbc SM |
401 | len -= copy; |
402 | offset += copy; | |
3f2c31d9 | 403 | |
2613af0e MD |
404 | if (vi->mergeable_rx_bufs) { |
405 | if (len) | |
406 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); | |
407 | else | |
408 | put_page(page); | |
409 | return skb; | |
410 | } | |
411 | ||
e878d78b SL |
412 | /* |
413 | * Verify that we can indeed put this data into a skb. | |
414 | * This is here to handle cases when the device erroneously | |
415 | * tries to receive more than is possible. This is usually | |
416 | * the case of a broken device. | |
417 | */ | |
418 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { | |
be443899 | 419 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
e878d78b SL |
420 | dev_kfree_skb(skb); |
421 | return NULL; | |
422 | } | |
2613af0e | 423 | BUG_ON(offset >= PAGE_SIZE); |
9ab86bbc | 424 | while (len) { |
2613af0e MD |
425 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
426 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, | |
427 | frag_size, truesize); | |
428 | len -= frag_size; | |
9ab86bbc SM |
429 | page = (struct page *)page->private; |
430 | offset = 0; | |
431 | } | |
3f2c31d9 | 432 | |
9ab86bbc | 433 | if (page) |
e9d7417b | 434 | give_pages(rq, page); |
3f2c31d9 | 435 | |
9ab86bbc SM |
436 | return skb; |
437 | } | |
3f2c31d9 | 438 | |
735fc405 JDB |
439 | static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, |
440 | struct send_queue *sq, | |
441 | struct xdp_frame *xdpf) | |
56434a01 | 442 | { |
56434a01 | 443 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
56434a01 JF |
444 | int err; |
445 | ||
cac320c8 JDB |
446 | /* virtqueue want to use data area in-front of packet */ |
447 | if (unlikely(xdpf->metasize > 0)) | |
448 | return -EOPNOTSUPP; | |
56434a01 | 449 | |
cac320c8 JDB |
450 | if (unlikely(xdpf->headroom < vi->hdr_len)) |
451 | return -EOVERFLOW; | |
452 | ||
453 | /* Make room for virtqueue hdr (also change xdpf->headroom?) */ | |
454 | xdpf->data -= vi->hdr_len; | |
f6b10209 | 455 | /* Zero header and leave csum up to XDP layers */ |
cac320c8 | 456 | hdr = xdpf->data; |
f6b10209 | 457 | memset(hdr, 0, vi->hdr_len); |
cac320c8 | 458 | xdpf->len += vi->hdr_len; |
bb91accf | 459 | |
cac320c8 | 460 | sg_init_one(sq->sg, xdpf->data, xdpf->len); |
bb91accf | 461 | |
cac320c8 | 462 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); |
11b7d897 | 463 | if (unlikely(err)) |
cac320c8 | 464 | return -ENOSPC; /* Caller handle free/refcnt */ |
56434a01 | 465 | |
cac320c8 | 466 | return 0; |
56434a01 JF |
467 | } |
468 | ||
2a43565c TM |
469 | static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi) |
470 | { | |
471 | unsigned int qp; | |
472 | ||
473 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); | |
474 | return &vi->sq[qp]; | |
475 | } | |
476 | ||
735fc405 | 477 | static int virtnet_xdp_xmit(struct net_device *dev, |
42b33468 | 478 | int n, struct xdp_frame **frames, u32 flags) |
186b3c99 JW |
479 | { |
480 | struct virtnet_info *vi = netdev_priv(dev); | |
8dcc5b0a | 481 | struct receive_queue *rq = vi->rq; |
735fc405 | 482 | struct xdp_frame *xdpf_sent; |
8dcc5b0a | 483 | struct bpf_prog *xdp_prog; |
735fc405 JDB |
484 | struct send_queue *sq; |
485 | unsigned int len; | |
735fc405 | 486 | int drops = 0; |
461f03dc | 487 | int kicks = 0; |
5b8f3c8d | 488 | int ret, err; |
735fc405 JDB |
489 | int i; |
490 | ||
2a43565c | 491 | sq = virtnet_xdp_sq(vi); |
186b3c99 | 492 | |
5b8f3c8d TM |
493 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { |
494 | ret = -EINVAL; | |
495 | drops = n; | |
496 | goto out; | |
497 | } | |
498 | ||
8dcc5b0a JDB |
499 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this |
500 | * indicate XDP resources have been successfully allocated. | |
501 | */ | |
502 | xdp_prog = rcu_dereference(rq->xdp_prog); | |
5b8f3c8d TM |
503 | if (!xdp_prog) { |
504 | ret = -ENXIO; | |
505 | drops = n; | |
506 | goto out; | |
507 | } | |
8dcc5b0a | 508 | |
735fc405 JDB |
509 | /* Free up any pending old buffers before queueing new ones. */ |
510 | while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) | |
511 | xdp_return_frame(xdpf_sent); | |
512 | ||
513 | for (i = 0; i < n; i++) { | |
514 | struct xdp_frame *xdpf = frames[i]; | |
515 | ||
516 | err = __virtnet_xdp_xmit_one(vi, sq, xdpf); | |
517 | if (err) { | |
518 | xdp_return_frame_rx_napi(xdpf); | |
519 | drops++; | |
520 | } | |
521 | } | |
5b8f3c8d | 522 | ret = n - drops; |
5d274cb4 | 523 | |
461f03dc TM |
524 | if (flags & XDP_XMIT_FLUSH) { |
525 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) | |
526 | kicks = 1; | |
527 | } | |
5b8f3c8d TM |
528 | out: |
529 | u64_stats_update_begin(&sq->stats.syncp); | |
530 | sq->stats.xdp_tx += n; | |
531 | sq->stats.xdp_tx_drops += drops; | |
461f03dc | 532 | sq->stats.kicks += kicks; |
5b8f3c8d | 533 | u64_stats_update_end(&sq->stats.syncp); |
5d274cb4 | 534 | |
5b8f3c8d | 535 | return ret; |
186b3c99 JW |
536 | } |
537 | ||
f6b10209 JW |
538 | static unsigned int virtnet_get_headroom(struct virtnet_info *vi) |
539 | { | |
540 | return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; | |
541 | } | |
542 | ||
4941d472 JW |
543 | /* We copy the packet for XDP in the following cases: |
544 | * | |
545 | * 1) Packet is scattered across multiple rx buffers. | |
546 | * 2) Headroom space is insufficient. | |
547 | * | |
548 | * This is inefficient but it's a temporary condition that | |
549 | * we hit right after XDP is enabled and until queue is refilled | |
550 | * with large buffers with sufficient headroom - so it should affect | |
551 | * at most queue size packets. | |
552 | * Afterwards, the conditions to enable | |
553 | * XDP should preclude the underlying device from sending packets | |
554 | * across multiple buffers (num_buf > 1), and we make sure buffers | |
555 | * have enough headroom. | |
556 | */ | |
557 | static struct page *xdp_linearize_page(struct receive_queue *rq, | |
558 | u16 *num_buf, | |
559 | struct page *p, | |
560 | int offset, | |
561 | int page_off, | |
562 | unsigned int *len) | |
563 | { | |
564 | struct page *page = alloc_page(GFP_ATOMIC); | |
565 | ||
566 | if (!page) | |
567 | return NULL; | |
568 | ||
569 | memcpy(page_address(page) + page_off, page_address(p) + offset, *len); | |
570 | page_off += *len; | |
571 | ||
572 | while (--*num_buf) { | |
3cc81a9a | 573 | int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
4941d472 JW |
574 | unsigned int buflen; |
575 | void *buf; | |
576 | int off; | |
577 | ||
578 | buf = virtqueue_get_buf(rq->vq, &buflen); | |
579 | if (unlikely(!buf)) | |
580 | goto err_buf; | |
581 | ||
582 | p = virt_to_head_page(buf); | |
583 | off = buf - page_address(p); | |
584 | ||
585 | /* guard against a misconfigured or uncooperative backend that | |
586 | * is sending packet larger than the MTU. | |
587 | */ | |
3cc81a9a | 588 | if ((page_off + buflen + tailroom) > PAGE_SIZE) { |
4941d472 JW |
589 | put_page(p); |
590 | goto err_buf; | |
591 | } | |
592 | ||
593 | memcpy(page_address(page) + page_off, | |
594 | page_address(p) + off, buflen); | |
595 | page_off += buflen; | |
596 | put_page(p); | |
597 | } | |
598 | ||
599 | /* Headroom does not contribute to packet length */ | |
600 | *len = page_off - VIRTIO_XDP_HEADROOM; | |
601 | return page; | |
602 | err_buf: | |
603 | __free_pages(page, 0); | |
604 | return NULL; | |
605 | } | |
606 | ||
bb91accf JW |
607 | static struct sk_buff *receive_small(struct net_device *dev, |
608 | struct virtnet_info *vi, | |
609 | struct receive_queue *rq, | |
192f68cf | 610 | void *buf, void *ctx, |
186b3c99 | 611 | unsigned int len, |
7d9d60fd | 612 | unsigned int *xdp_xmit, |
d46eeeaf | 613 | struct virtnet_rq_stats *stats) |
f121159d | 614 | { |
f6b10209 | 615 | struct sk_buff *skb; |
bb91accf | 616 | struct bpf_prog *xdp_prog; |
4941d472 | 617 | unsigned int xdp_headroom = (unsigned long)ctx; |
f6b10209 JW |
618 | unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; |
619 | unsigned int headroom = vi->hdr_len + header_offset; | |
620 | unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + | |
621 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
4941d472 | 622 | struct page *page = virt_to_head_page(buf); |
11b7d897 | 623 | unsigned int delta = 0; |
4941d472 | 624 | struct page *xdp_page; |
11b7d897 JDB |
625 | int err; |
626 | ||
012873d0 | 627 | len -= vi->hdr_len; |
d46eeeaf | 628 | stats->bytes += len; |
f121159d | 629 | |
bb91accf JW |
630 | rcu_read_lock(); |
631 | xdp_prog = rcu_dereference(rq->xdp_prog); | |
632 | if (xdp_prog) { | |
f6b10209 | 633 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; |
44fa2dbd | 634 | struct xdp_frame *xdpf; |
0354e4d1 | 635 | struct xdp_buff xdp; |
f6b10209 | 636 | void *orig_data; |
bb91accf JW |
637 | u32 act; |
638 | ||
95dbe9e7 | 639 | if (unlikely(hdr->hdr.gso_type)) |
bb91accf | 640 | goto err_xdp; |
0354e4d1 | 641 | |
4941d472 JW |
642 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { |
643 | int offset = buf - page_address(page) + header_offset; | |
644 | unsigned int tlen = len + vi->hdr_len; | |
645 | u16 num_buf = 1; | |
646 | ||
647 | xdp_headroom = virtnet_get_headroom(vi); | |
648 | header_offset = VIRTNET_RX_PAD + xdp_headroom; | |
649 | headroom = vi->hdr_len + header_offset; | |
650 | buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + | |
651 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
652 | xdp_page = xdp_linearize_page(rq, &num_buf, page, | |
653 | offset, header_offset, | |
654 | &tlen); | |
655 | if (!xdp_page) | |
656 | goto err_xdp; | |
657 | ||
658 | buf = page_address(xdp_page); | |
659 | put_page(page); | |
660 | page = xdp_page; | |
661 | } | |
662 | ||
f6b10209 JW |
663 | xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; |
664 | xdp.data = xdp.data_hard_start + xdp_headroom; | |
de8f3a83 | 665 | xdp_set_data_meta_invalid(&xdp); |
0354e4d1 | 666 | xdp.data_end = xdp.data + len; |
754b8a21 | 667 | xdp.rxq = &rq->xdp_rxq; |
f6b10209 | 668 | orig_data = xdp.data; |
0354e4d1 | 669 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
d46eeeaf | 670 | stats->xdp_packets++; |
0354e4d1 | 671 | |
bb91accf JW |
672 | switch (act) { |
673 | case XDP_PASS: | |
2de2f7f4 | 674 | /* Recalculate length in case bpf program changed it */ |
f6b10209 | 675 | delta = orig_data - xdp.data; |
6870de43 | 676 | len = xdp.data_end - xdp.data; |
bb91accf JW |
677 | break; |
678 | case XDP_TX: | |
d46eeeaf | 679 | stats->xdp_tx++; |
44fa2dbd JDB |
680 | xdpf = convert_to_xdp_frame(&xdp); |
681 | if (unlikely(!xdpf)) | |
682 | goto err_xdp; | |
ca9e83b4 JW |
683 | err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); |
684 | if (unlikely(err < 0)) { | |
0354e4d1 | 685 | trace_xdp_exception(vi->dev, xdp_prog, act); |
11b7d897 JDB |
686 | goto err_xdp; |
687 | } | |
2471c75e | 688 | *xdp_xmit |= VIRTIO_XDP_TX; |
186b3c99 JW |
689 | rcu_read_unlock(); |
690 | goto xdp_xmit; | |
691 | case XDP_REDIRECT: | |
d46eeeaf | 692 | stats->xdp_redirects++; |
186b3c99 | 693 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
11b7d897 JDB |
694 | if (err) |
695 | goto err_xdp; | |
2471c75e | 696 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
bb91accf JW |
697 | rcu_read_unlock(); |
698 | goto xdp_xmit; | |
bb91accf | 699 | default: |
0354e4d1 | 700 | bpf_warn_invalid_xdp_action(act); |
b633d440 | 701 | /* fall through */ |
0354e4d1 JF |
702 | case XDP_ABORTED: |
703 | trace_xdp_exception(vi->dev, xdp_prog, act); | |
704 | case XDP_DROP: | |
bb91accf JW |
705 | goto err_xdp; |
706 | } | |
707 | } | |
708 | rcu_read_unlock(); | |
709 | ||
f6b10209 JW |
710 | skb = build_skb(buf, buflen); |
711 | if (!skb) { | |
4941d472 | 712 | put_page(page); |
f6b10209 JW |
713 | goto err; |
714 | } | |
715 | skb_reserve(skb, headroom - delta); | |
6870de43 | 716 | skb_put(skb, len); |
f6b10209 JW |
717 | if (!delta) { |
718 | buf += header_offset; | |
719 | memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); | |
720 | } /* keep zeroed vnet hdr since packet was changed by bpf */ | |
721 | ||
722 | err: | |
f121159d | 723 | return skb; |
bb91accf JW |
724 | |
725 | err_xdp: | |
726 | rcu_read_unlock(); | |
d46eeeaf JW |
727 | stats->xdp_drops++; |
728 | stats->drops++; | |
4941d472 | 729 | put_page(page); |
bb91accf JW |
730 | xdp_xmit: |
731 | return NULL; | |
f121159d MT |
732 | } |
733 | ||
734 | static struct sk_buff *receive_big(struct net_device *dev, | |
946fa564 | 735 | struct virtnet_info *vi, |
f121159d MT |
736 | struct receive_queue *rq, |
737 | void *buf, | |
7d9d60fd | 738 | unsigned int len, |
d46eeeaf | 739 | struct virtnet_rq_stats *stats) |
f121159d MT |
740 | { |
741 | struct page *page = buf; | |
c47a43d3 | 742 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); |
f600b690 | 743 | |
d46eeeaf | 744 | stats->bytes += len - vi->hdr_len; |
f121159d MT |
745 | if (unlikely(!skb)) |
746 | goto err; | |
747 | ||
748 | return skb; | |
749 | ||
750 | err: | |
d46eeeaf | 751 | stats->drops++; |
f121159d MT |
752 | give_pages(rq, page); |
753 | return NULL; | |
754 | } | |
755 | ||
8fc3b9e9 | 756 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
fdd819b2 | 757 | struct virtnet_info *vi, |
8fc3b9e9 | 758 | struct receive_queue *rq, |
680557cf MT |
759 | void *buf, |
760 | void *ctx, | |
186b3c99 | 761 | unsigned int len, |
7d9d60fd | 762 | unsigned int *xdp_xmit, |
d46eeeaf | 763 | struct virtnet_rq_stats *stats) |
9ab86bbc | 764 | { |
012873d0 MT |
765 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
766 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); | |
8fc3b9e9 MT |
767 | struct page *page = virt_to_head_page(buf); |
768 | int offset = buf - page_address(page); | |
f600b690 JF |
769 | struct sk_buff *head_skb, *curr_skb; |
770 | struct bpf_prog *xdp_prog; | |
771 | unsigned int truesize; | |
4941d472 | 772 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); |
3cc81a9a | 773 | int err; |
f600b690 | 774 | |
56434a01 | 775 | head_skb = NULL; |
d46eeeaf | 776 | stats->bytes += len - vi->hdr_len; |
56434a01 | 777 | |
f600b690 JF |
778 | rcu_read_lock(); |
779 | xdp_prog = rcu_dereference(rq->xdp_prog); | |
780 | if (xdp_prog) { | |
44fa2dbd | 781 | struct xdp_frame *xdpf; |
72979a6c | 782 | struct page *xdp_page; |
0354e4d1 | 783 | struct xdp_buff xdp; |
0354e4d1 | 784 | void *data; |
f600b690 JF |
785 | u32 act; |
786 | ||
3d62b2a0 JW |
787 | /* Transient failure which in theory could occur if |
788 | * in-flight packets from before XDP was enabled reach | |
789 | * the receive path after XDP is loaded. | |
790 | */ | |
791 | if (unlikely(hdr->hdr.gso_type)) | |
792 | goto err_xdp; | |
793 | ||
3cc81a9a JW |
794 | /* This happens when rx buffer size is underestimated |
795 | * or headroom is not enough because of the buffer | |
796 | * was refilled before XDP is set. This should only | |
797 | * happen for the first several packets, so we don't | |
798 | * care much about its performance. | |
799 | */ | |
4941d472 JW |
800 | if (unlikely(num_buf > 1 || |
801 | headroom < virtnet_get_headroom(vi))) { | |
72979a6c | 802 | /* linearize data for XDP */ |
56a86f84 | 803 | xdp_page = xdp_linearize_page(rq, &num_buf, |
4941d472 JW |
804 | page, offset, |
805 | VIRTIO_XDP_HEADROOM, | |
806 | &len); | |
72979a6c JF |
807 | if (!xdp_page) |
808 | goto err_xdp; | |
2de2f7f4 | 809 | offset = VIRTIO_XDP_HEADROOM; |
72979a6c JF |
810 | } else { |
811 | xdp_page = page; | |
f600b690 JF |
812 | } |
813 | ||
2de2f7f4 JF |
814 | /* Allow consuming headroom but reserve enough space to push |
815 | * the descriptor on if we get an XDP_TX return code. | |
816 | */ | |
0354e4d1 | 817 | data = page_address(xdp_page) + offset; |
2de2f7f4 | 818 | xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; |
0354e4d1 | 819 | xdp.data = data + vi->hdr_len; |
de8f3a83 | 820 | xdp_set_data_meta_invalid(&xdp); |
0354e4d1 | 821 | xdp.data_end = xdp.data + (len - vi->hdr_len); |
754b8a21 JDB |
822 | xdp.rxq = &rq->xdp_rxq; |
823 | ||
0354e4d1 | 824 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
d46eeeaf | 825 | stats->xdp_packets++; |
0354e4d1 | 826 | |
56434a01 JF |
827 | switch (act) { |
828 | case XDP_PASS: | |
2de2f7f4 JF |
829 | /* recalculate offset to account for any header |
830 | * adjustments. Note other cases do not build an | |
831 | * skb and avoid using offset | |
832 | */ | |
833 | offset = xdp.data - | |
834 | page_address(xdp_page) - vi->hdr_len; | |
835 | ||
6870de43 NS |
836 | /* recalculate len if xdp.data or xdp.data_end were |
837 | * adjusted | |
838 | */ | |
aaa64527 | 839 | len = xdp.data_end - xdp.data + vi->hdr_len; |
1830f893 JW |
840 | /* We can only create skb based on xdp_page. */ |
841 | if (unlikely(xdp_page != page)) { | |
842 | rcu_read_unlock(); | |
843 | put_page(page); | |
844 | head_skb = page_to_skb(vi, rq, xdp_page, | |
2de2f7f4 | 845 | offset, len, PAGE_SIZE); |
1830f893 JW |
846 | return head_skb; |
847 | } | |
56434a01 JF |
848 | break; |
849 | case XDP_TX: | |
d46eeeaf | 850 | stats->xdp_tx++; |
44fa2dbd JDB |
851 | xdpf = convert_to_xdp_frame(&xdp); |
852 | if (unlikely(!xdpf)) | |
853 | goto err_xdp; | |
ca9e83b4 JW |
854 | err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); |
855 | if (unlikely(err < 0)) { | |
0354e4d1 | 856 | trace_xdp_exception(vi->dev, xdp_prog, act); |
11b7d897 JDB |
857 | if (unlikely(xdp_page != page)) |
858 | put_page(xdp_page); | |
859 | goto err_xdp; | |
860 | } | |
2471c75e | 861 | *xdp_xmit |= VIRTIO_XDP_TX; |
72979a6c | 862 | if (unlikely(xdp_page != page)) |
5d458a13 | 863 | put_page(page); |
56434a01 JF |
864 | rcu_read_unlock(); |
865 | goto xdp_xmit; | |
3cc81a9a | 866 | case XDP_REDIRECT: |
d46eeeaf | 867 | stats->xdp_redirects++; |
3cc81a9a JW |
868 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
869 | if (err) { | |
870 | if (unlikely(xdp_page != page)) | |
871 | put_page(xdp_page); | |
872 | goto err_xdp; | |
873 | } | |
2471c75e | 874 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
3cc81a9a | 875 | if (unlikely(xdp_page != page)) |
6890418b | 876 | put_page(page); |
3cc81a9a JW |
877 | rcu_read_unlock(); |
878 | goto xdp_xmit; | |
56434a01 | 879 | default: |
0354e4d1 | 880 | bpf_warn_invalid_xdp_action(act); |
b633d440 | 881 | /* fall through */ |
0354e4d1 JF |
882 | case XDP_ABORTED: |
883 | trace_xdp_exception(vi->dev, xdp_prog, act); | |
b633d440 | 884 | /* fall through */ |
0354e4d1 | 885 | case XDP_DROP: |
72979a6c JF |
886 | if (unlikely(xdp_page != page)) |
887 | __free_pages(xdp_page, 0); | |
f600b690 | 888 | goto err_xdp; |
56434a01 | 889 | } |
f600b690 JF |
890 | } |
891 | rcu_read_unlock(); | |
ab7db917 | 892 | |
28b39bc7 JW |
893 | truesize = mergeable_ctx_to_truesize(ctx); |
894 | if (unlikely(len > truesize)) { | |
56da5fd0 | 895 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
680557cf MT |
896 | dev->name, len, (unsigned long)ctx); |
897 | dev->stats.rx_length_errors++; | |
898 | goto err_skb; | |
899 | } | |
28b39bc7 | 900 | |
f600b690 JF |
901 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize); |
902 | curr_skb = head_skb; | |
9ab86bbc | 903 | |
8fc3b9e9 MT |
904 | if (unlikely(!curr_skb)) |
905 | goto err_skb; | |
9ab86bbc | 906 | while (--num_buf) { |
8fc3b9e9 MT |
907 | int num_skb_frags; |
908 | ||
680557cf | 909 | buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); |
03e9f8a0 | 910 | if (unlikely(!buf)) { |
8fc3b9e9 | 911 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
fdd819b2 | 912 | dev->name, num_buf, |
012873d0 MT |
913 | virtio16_to_cpu(vi->vdev, |
914 | hdr->num_buffers)); | |
8fc3b9e9 MT |
915 | dev->stats.rx_length_errors++; |
916 | goto err_buf; | |
3f2c31d9 | 917 | } |
8fc3b9e9 | 918 | |
d46eeeaf | 919 | stats->bytes += len; |
8fc3b9e9 | 920 | page = virt_to_head_page(buf); |
28b39bc7 JW |
921 | |
922 | truesize = mergeable_ctx_to_truesize(ctx); | |
923 | if (unlikely(len > truesize)) { | |
56da5fd0 | 924 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
680557cf MT |
925 | dev->name, len, (unsigned long)ctx); |
926 | dev->stats.rx_length_errors++; | |
927 | goto err_skb; | |
928 | } | |
8fc3b9e9 MT |
929 | |
930 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; | |
2613af0e MD |
931 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
932 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | |
8fc3b9e9 MT |
933 | |
934 | if (unlikely(!nskb)) | |
935 | goto err_skb; | |
2613af0e MD |
936 | if (curr_skb == head_skb) |
937 | skb_shinfo(curr_skb)->frag_list = nskb; | |
938 | else | |
939 | curr_skb->next = nskb; | |
940 | curr_skb = nskb; | |
941 | head_skb->truesize += nskb->truesize; | |
942 | num_skb_frags = 0; | |
943 | } | |
944 | if (curr_skb != head_skb) { | |
945 | head_skb->data_len += len; | |
946 | head_skb->len += len; | |
fb51879d | 947 | head_skb->truesize += truesize; |
2613af0e | 948 | } |
8fc3b9e9 | 949 | offset = buf - page_address(page); |
ba275241 JW |
950 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
951 | put_page(page); | |
952 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | |
fb51879d | 953 | len, truesize); |
ba275241 JW |
954 | } else { |
955 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | |
fb51879d | 956 | offset, len, truesize); |
ba275241 | 957 | } |
8fc3b9e9 MT |
958 | } |
959 | ||
5377d758 | 960 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); |
8fc3b9e9 MT |
961 | return head_skb; |
962 | ||
f600b690 JF |
963 | err_xdp: |
964 | rcu_read_unlock(); | |
d46eeeaf | 965 | stats->xdp_drops++; |
8fc3b9e9 MT |
966 | err_skb: |
967 | put_page(page); | |
850e088d | 968 | while (num_buf-- > 1) { |
680557cf MT |
969 | buf = virtqueue_get_buf(rq->vq, &len); |
970 | if (unlikely(!buf)) { | |
8fc3b9e9 MT |
971 | pr_debug("%s: rx error: %d buffers missing\n", |
972 | dev->name, num_buf); | |
973 | dev->stats.rx_length_errors++; | |
974 | break; | |
975 | } | |
d46eeeaf | 976 | stats->bytes += len; |
680557cf | 977 | page = virt_to_head_page(buf); |
8fc3b9e9 | 978 | put_page(page); |
9ab86bbc | 979 | } |
8fc3b9e9 | 980 | err_buf: |
d46eeeaf | 981 | stats->drops++; |
8fc3b9e9 | 982 | dev_kfree_skb(head_skb); |
56434a01 | 983 | xdp_xmit: |
8fc3b9e9 | 984 | return NULL; |
9ab86bbc SM |
985 | } |
986 | ||
7d9d60fd TM |
987 | static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
988 | void *buf, unsigned int len, void **ctx, | |
a0929a44 | 989 | unsigned int *xdp_xmit, |
d46eeeaf | 990 | struct virtnet_rq_stats *stats) |
9ab86bbc | 991 | { |
e9d7417b | 992 | struct net_device *dev = vi->dev; |
9ab86bbc | 993 | struct sk_buff *skb; |
012873d0 | 994 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
3f2c31d9 | 995 | |
bcff3162 | 996 | if (unlikely(len < vi->hdr_len + ETH_HLEN)) { |
9ab86bbc SM |
997 | pr_debug("%s: short packet %i\n", dev->name, len); |
998 | dev->stats.rx_length_errors++; | |
ab7db917 | 999 | if (vi->mergeable_rx_bufs) { |
680557cf | 1000 | put_page(virt_to_head_page(buf)); |
ab7db917 | 1001 | } else if (vi->big_packets) { |
98bfd23c | 1002 | give_pages(rq, buf); |
ab7db917 | 1003 | } else { |
f6b10209 | 1004 | put_page(virt_to_head_page(buf)); |
ab7db917 | 1005 | } |
7d9d60fd | 1006 | return; |
9ab86bbc | 1007 | } |
3f2c31d9 | 1008 | |
f121159d | 1009 | if (vi->mergeable_rx_bufs) |
7d9d60fd | 1010 | skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, |
a0929a44 | 1011 | stats); |
f121159d | 1012 | else if (vi->big_packets) |
a0929a44 | 1013 | skb = receive_big(dev, vi, rq, buf, len, stats); |
f121159d | 1014 | else |
a0929a44 | 1015 | skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); |
f121159d MT |
1016 | |
1017 | if (unlikely(!skb)) | |
7d9d60fd | 1018 | return; |
3f2c31d9 | 1019 | |
9ab86bbc | 1020 | hdr = skb_vnet_hdr(skb); |
3fa2a1df | 1021 | |
e858fae2 | 1022 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) |
10a8d94a | 1023 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
296f96fc | 1024 | |
e858fae2 MR |
1025 | if (virtio_net_hdr_to_skb(skb, &hdr->hdr, |
1026 | virtio_is_little_endian(vi->vdev))) { | |
1027 | net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", | |
1028 | dev->name, hdr->hdr.gso_type, | |
1029 | hdr->hdr.gso_size); | |
1030 | goto frame_err; | |
296f96fc RR |
1031 | } |
1032 | ||
d1dc06dc MR |
1033 | skb->protocol = eth_type_trans(skb, dev); |
1034 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | |
1035 | ntohs(skb->protocol), skb->len, skb->pkt_type); | |
1036 | ||
0fbd050a | 1037 | napi_gro_receive(&rq->napi, skb); |
7d9d60fd | 1038 | return; |
296f96fc RR |
1039 | |
1040 | frame_err: | |
1041 | dev->stats.rx_frame_errors++; | |
296f96fc RR |
1042 | dev_kfree_skb(skb); |
1043 | } | |
1044 | ||
192f68cf JW |
1045 | /* Unlike mergeable buffers, all buffers are allocated to the |
1046 | * same size, except for the headroom. For this reason we do | |
1047 | * not need to use mergeable_len_to_ctx here - it is enough | |
1048 | * to store the headroom as the context ignoring the truesize. | |
1049 | */ | |
946fa564 MT |
1050 | static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
1051 | gfp_t gfp) | |
296f96fc | 1052 | { |
f6b10209 JW |
1053 | struct page_frag *alloc_frag = &rq->alloc_frag; |
1054 | char *buf; | |
2de2f7f4 | 1055 | unsigned int xdp_headroom = virtnet_get_headroom(vi); |
192f68cf | 1056 | void *ctx = (void *)(unsigned long)xdp_headroom; |
f6b10209 | 1057 | int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; |
9ab86bbc | 1058 | int err; |
3f2c31d9 | 1059 | |
f6b10209 JW |
1060 | len = SKB_DATA_ALIGN(len) + |
1061 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
1062 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) | |
9ab86bbc | 1063 | return -ENOMEM; |
296f96fc | 1064 | |
f6b10209 JW |
1065 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
1066 | get_page(alloc_frag->page); | |
1067 | alloc_frag->offset += len; | |
1068 | sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, | |
1069 | vi->hdr_len + GOOD_PACKET_LEN); | |
192f68cf | 1070 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
9ab86bbc | 1071 | if (err < 0) |
f6b10209 | 1072 | put_page(virt_to_head_page(buf)); |
9ab86bbc SM |
1073 | return err; |
1074 | } | |
97402b96 | 1075 | |
012873d0 MT |
1076 | static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, |
1077 | gfp_t gfp) | |
9ab86bbc | 1078 | { |
9ab86bbc SM |
1079 | struct page *first, *list = NULL; |
1080 | char *p; | |
1081 | int i, err, offset; | |
1082 | ||
a5835440 RR |
1083 | sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); |
1084 | ||
e9d7417b | 1085 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
9ab86bbc | 1086 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
e9d7417b | 1087 | first = get_a_page(rq, gfp); |
9ab86bbc SM |
1088 | if (!first) { |
1089 | if (list) | |
e9d7417b | 1090 | give_pages(rq, list); |
9ab86bbc | 1091 | return -ENOMEM; |
97402b96 | 1092 | } |
e9d7417b | 1093 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
97402b96 | 1094 | |
9ab86bbc SM |
1095 | /* chain new page in list head to match sg */ |
1096 | first->private = (unsigned long)list; | |
1097 | list = first; | |
1098 | } | |
296f96fc | 1099 | |
e9d7417b | 1100 | first = get_a_page(rq, gfp); |
9ab86bbc | 1101 | if (!first) { |
e9d7417b | 1102 | give_pages(rq, list); |
9ab86bbc SM |
1103 | return -ENOMEM; |
1104 | } | |
1105 | p = page_address(first); | |
1106 | ||
e9d7417b | 1107 | /* rq->sg[0], rq->sg[1] share the same page */ |
012873d0 MT |
1108 | /* a separated rq->sg[0] for header - required in case !any_header_sg */ |
1109 | sg_set_buf(&rq->sg[0], p, vi->hdr_len); | |
9ab86bbc | 1110 | |
e9d7417b | 1111 | /* rq->sg[1] for data packet, from offset */ |
9ab86bbc | 1112 | offset = sizeof(struct padded_vnet_hdr); |
e9d7417b | 1113 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
9ab86bbc SM |
1114 | |
1115 | /* chain first in list head */ | |
1116 | first->private = (unsigned long)list; | |
9dc7b9e4 RR |
1117 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
1118 | first, gfp); | |
9ab86bbc | 1119 | if (err < 0) |
e9d7417b | 1120 | give_pages(rq, first); |
9ab86bbc SM |
1121 | |
1122 | return err; | |
296f96fc RR |
1123 | } |
1124 | ||
d85b758f | 1125 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, |
3cc81a9a JW |
1126 | struct ewma_pkt_len *avg_pkt_len, |
1127 | unsigned int room) | |
3f2c31d9 | 1128 | { |
ab7db917 | 1129 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
fbf28d78 MD |
1130 | unsigned int len; |
1131 | ||
3cc81a9a JW |
1132 | if (room) |
1133 | return PAGE_SIZE - room; | |
1134 | ||
1135 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), | |
f0c3192c | 1136 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
3cc81a9a | 1137 | |
e377fcc8 | 1138 | return ALIGN(len, L1_CACHE_BYTES); |
fbf28d78 MD |
1139 | } |
1140 | ||
2de2f7f4 JF |
1141 | static int add_recvbuf_mergeable(struct virtnet_info *vi, |
1142 | struct receive_queue *rq, gfp_t gfp) | |
fbf28d78 | 1143 | { |
fb51879d | 1144 | struct page_frag *alloc_frag = &rq->alloc_frag; |
2de2f7f4 | 1145 | unsigned int headroom = virtnet_get_headroom(vi); |
3cc81a9a JW |
1146 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; |
1147 | unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); | |
fb51879d | 1148 | char *buf; |
680557cf | 1149 | void *ctx; |
3f2c31d9 | 1150 | int err; |
fb51879d | 1151 | unsigned int len, hole; |
3f2c31d9 | 1152 | |
3cc81a9a JW |
1153 | /* Extra tailroom is needed to satisfy XDP's assumption. This |
1154 | * means rx frags coalescing won't work, but consider we've | |
1155 | * disabled GSO for XDP, it won't be a big issue. | |
1156 | */ | |
1157 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); | |
1158 | if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) | |
9ab86bbc | 1159 | return -ENOMEM; |
ab7db917 | 1160 | |
fb51879d | 1161 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
2de2f7f4 | 1162 | buf += headroom; /* advance address leaving hole at front of pkt */ |
fb51879d | 1163 | get_page(alloc_frag->page); |
3cc81a9a | 1164 | alloc_frag->offset += len + room; |
fb51879d | 1165 | hole = alloc_frag->size - alloc_frag->offset; |
3cc81a9a | 1166 | if (hole < len + room) { |
ab7db917 MD |
1167 | /* To avoid internal fragmentation, if there is very likely not |
1168 | * enough space for another buffer, add the remaining space to | |
1daa8790 | 1169 | * the current buffer. |
ab7db917 | 1170 | */ |
fb51879d MD |
1171 | len += hole; |
1172 | alloc_frag->offset += hole; | |
1173 | } | |
3f2c31d9 | 1174 | |
fb51879d | 1175 | sg_init_one(rq->sg, buf, len); |
29fda25a | 1176 | ctx = mergeable_len_to_ctx(len, headroom); |
680557cf | 1177 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
9ab86bbc | 1178 | if (err < 0) |
2613af0e | 1179 | put_page(virt_to_head_page(buf)); |
3f2c31d9 | 1180 | |
9ab86bbc SM |
1181 | return err; |
1182 | } | |
3f2c31d9 | 1183 | |
b2baed69 RR |
1184 | /* |
1185 | * Returns false if we couldn't fill entirely (OOM). | |
1186 | * | |
1187 | * Normally run in the receive path, but can also be run from ndo_open | |
1188 | * before we're receiving packets, or from refill_work which is | |
1189 | * careful to disable receiving (using napi_disable). | |
1190 | */ | |
946fa564 MT |
1191 | static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, |
1192 | gfp_t gfp) | |
9ab86bbc SM |
1193 | { |
1194 | int err; | |
1788f495 | 1195 | bool oom; |
3f2c31d9 | 1196 | |
9ab86bbc SM |
1197 | do { |
1198 | if (vi->mergeable_rx_bufs) | |
2de2f7f4 | 1199 | err = add_recvbuf_mergeable(vi, rq, gfp); |
9ab86bbc | 1200 | else if (vi->big_packets) |
012873d0 | 1201 | err = add_recvbuf_big(vi, rq, gfp); |
9ab86bbc | 1202 | else |
946fa564 | 1203 | err = add_recvbuf_small(vi, rq, gfp); |
3f2c31d9 | 1204 | |
1788f495 | 1205 | oom = err == -ENOMEM; |
9ed4cb07 | 1206 | if (err) |
3f2c31d9 | 1207 | break; |
b7dfde95 | 1208 | } while (rq->vq->num_free); |
461f03dc TM |
1209 | if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { |
1210 | u64_stats_update_begin(&rq->stats.syncp); | |
d46eeeaf | 1211 | rq->stats.kicks++; |
461f03dc TM |
1212 | u64_stats_update_end(&rq->stats.syncp); |
1213 | } | |
1214 | ||
3161e453 | 1215 | return !oom; |
3f2c31d9 MM |
1216 | } |
1217 | ||
18445c4d | 1218 | static void skb_recv_done(struct virtqueue *rvq) |
296f96fc RR |
1219 | { |
1220 | struct virtnet_info *vi = rvq->vdev->priv; | |
986a4f4d | 1221 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
e9d7417b | 1222 | |
e4e8452a | 1223 | virtqueue_napi_schedule(&rq->napi, rvq); |
296f96fc RR |
1224 | } |
1225 | ||
e4e8452a | 1226 | static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) |
3e9d08ec | 1227 | { |
e4e8452a | 1228 | napi_enable(napi); |
3e9d08ec BR |
1229 | |
1230 | /* If all buffers were filled by other side before we napi_enabled, we | |
e4e8452a WB |
1231 | * won't get another interrupt, so process any outstanding packets now. |
1232 | * Call local_bh_enable after to trigger softIRQ processing. | |
1233 | */ | |
1234 | local_bh_disable(); | |
1235 | virtqueue_napi_schedule(napi, vq); | |
1236 | local_bh_enable(); | |
3e9d08ec BR |
1237 | } |
1238 | ||
b92f1e67 WB |
1239 | static void virtnet_napi_tx_enable(struct virtnet_info *vi, |
1240 | struct virtqueue *vq, | |
1241 | struct napi_struct *napi) | |
1242 | { | |
1243 | if (!napi->weight) | |
1244 | return; | |
1245 | ||
1246 | /* Tx napi touches cachelines on the cpu handling tx interrupts. Only | |
1247 | * enable the feature if this is likely affine with the transmit path. | |
1248 | */ | |
1249 | if (!vi->affinity_hint_set) { | |
1250 | napi->weight = 0; | |
1251 | return; | |
1252 | } | |
1253 | ||
1254 | return virtnet_napi_enable(vq, napi); | |
1255 | } | |
1256 | ||
78a57b48 WB |
1257 | static void virtnet_napi_tx_disable(struct napi_struct *napi) |
1258 | { | |
1259 | if (napi->weight) | |
1260 | napi_disable(napi); | |
1261 | } | |
1262 | ||
3161e453 RR |
1263 | static void refill_work(struct work_struct *work) |
1264 | { | |
e9d7417b JW |
1265 | struct virtnet_info *vi = |
1266 | container_of(work, struct virtnet_info, refill.work); | |
3161e453 | 1267 | bool still_empty; |
986a4f4d JW |
1268 | int i; |
1269 | ||
55257d72 | 1270 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
986a4f4d | 1271 | struct receive_queue *rq = &vi->rq[i]; |
3161e453 | 1272 | |
986a4f4d | 1273 | napi_disable(&rq->napi); |
946fa564 | 1274 | still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); |
e4e8452a | 1275 | virtnet_napi_enable(rq->vq, &rq->napi); |
3161e453 | 1276 | |
986a4f4d JW |
1277 | /* In theory, this can happen: if we don't get any buffers in |
1278 | * we will *never* try to fill again. | |
1279 | */ | |
1280 | if (still_empty) | |
1281 | schedule_delayed_work(&vi->refill, HZ/2); | |
1282 | } | |
3161e453 RR |
1283 | } |
1284 | ||
2471c75e JDB |
1285 | static int virtnet_receive(struct receive_queue *rq, int budget, |
1286 | unsigned int *xdp_xmit) | |
296f96fc | 1287 | { |
e9d7417b | 1288 | struct virtnet_info *vi = rq->vq->vdev->priv; |
d46eeeaf | 1289 | struct virtnet_rq_stats stats = {}; |
a0929a44 | 1290 | unsigned int len; |
9ab86bbc | 1291 | void *buf; |
a0929a44 | 1292 | int i; |
296f96fc | 1293 | |
192f68cf | 1294 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
680557cf MT |
1295 | void *ctx; |
1296 | ||
d46eeeaf | 1297 | while (stats.packets < budget && |
680557cf | 1298 | (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { |
a0929a44 | 1299 | receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); |
d46eeeaf | 1300 | stats.packets++; |
680557cf MT |
1301 | } |
1302 | } else { | |
d46eeeaf | 1303 | while (stats.packets < budget && |
680557cf | 1304 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
a0929a44 | 1305 | receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); |
d46eeeaf | 1306 | stats.packets++; |
680557cf | 1307 | } |
296f96fc RR |
1308 | } |
1309 | ||
be121f46 | 1310 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { |
946fa564 | 1311 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) |
3b07e9ca | 1312 | schedule_delayed_work(&vi->refill, 0); |
3161e453 | 1313 | } |
296f96fc | 1314 | |
d7dfc5cf | 1315 | u64_stats_update_begin(&rq->stats.syncp); |
a0929a44 TM |
1316 | for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { |
1317 | size_t offset = virtnet_rq_stats_desc[i].offset; | |
1318 | u64 *item; | |
1319 | ||
d46eeeaf JW |
1320 | item = (u64 *)((u8 *)&rq->stats + offset); |
1321 | *item += *(u64 *)((u8 *)&stats + offset); | |
a0929a44 | 1322 | } |
d7dfc5cf | 1323 | u64_stats_update_end(&rq->stats.syncp); |
61845d20 | 1324 | |
d46eeeaf | 1325 | return stats.packets; |
2ffa7598 JW |
1326 | } |
1327 | ||
ea7735d9 WB |
1328 | static void free_old_xmit_skbs(struct send_queue *sq) |
1329 | { | |
1330 | struct sk_buff *skb; | |
1331 | unsigned int len; | |
ea7735d9 WB |
1332 | unsigned int packets = 0; |
1333 | unsigned int bytes = 0; | |
1334 | ||
1335 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { | |
1336 | pr_debug("Sent skb %p\n", skb); | |
1337 | ||
1338 | bytes += skb->len; | |
1339 | packets++; | |
1340 | ||
dadc0736 | 1341 | dev_consume_skb_any(skb); |
ea7735d9 WB |
1342 | } |
1343 | ||
1344 | /* Avoid overhead when no packets have been processed | |
1345 | * happens when called speculatively from start_xmit. | |
1346 | */ | |
1347 | if (!packets) | |
1348 | return; | |
1349 | ||
d7dfc5cf TM |
1350 | u64_stats_update_begin(&sq->stats.syncp); |
1351 | sq->stats.bytes += bytes; | |
1352 | sq->stats.packets += packets; | |
1353 | u64_stats_update_end(&sq->stats.syncp); | |
ea7735d9 WB |
1354 | } |
1355 | ||
7b0411ef WB |
1356 | static void virtnet_poll_cleantx(struct receive_queue *rq) |
1357 | { | |
1358 | struct virtnet_info *vi = rq->vq->vdev->priv; | |
1359 | unsigned int index = vq2rxq(rq->vq); | |
1360 | struct send_queue *sq = &vi->sq[index]; | |
1361 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); | |
1362 | ||
1363 | if (!sq->napi.weight) | |
1364 | return; | |
1365 | ||
1366 | if (__netif_tx_trylock(txq)) { | |
1367 | free_old_xmit_skbs(sq); | |
1368 | __netif_tx_unlock(txq); | |
1369 | } | |
1370 | ||
1371 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) | |
1372 | netif_tx_wake_queue(txq); | |
1373 | } | |
1374 | ||
2ffa7598 JW |
1375 | static int virtnet_poll(struct napi_struct *napi, int budget) |
1376 | { | |
1377 | struct receive_queue *rq = | |
1378 | container_of(napi, struct receive_queue, napi); | |
9267c430 JW |
1379 | struct virtnet_info *vi = rq->vq->vdev->priv; |
1380 | struct send_queue *sq; | |
2a43565c | 1381 | unsigned int received; |
2471c75e | 1382 | unsigned int xdp_xmit = 0; |
2ffa7598 | 1383 | |
7b0411ef WB |
1384 | virtnet_poll_cleantx(rq); |
1385 | ||
186b3c99 | 1386 | received = virtnet_receive(rq, budget, &xdp_xmit); |
2ffa7598 | 1387 | |
8329d98e | 1388 | /* Out of packets? */ |
e4e8452a WB |
1389 | if (received < budget) |
1390 | virtqueue_napi_complete(napi, rq->vq, received); | |
296f96fc | 1391 | |
2471c75e JDB |
1392 | if (xdp_xmit & VIRTIO_XDP_REDIR) |
1393 | xdp_do_flush_map(); | |
1394 | ||
1395 | if (xdp_xmit & VIRTIO_XDP_TX) { | |
2a43565c | 1396 | sq = virtnet_xdp_sq(vi); |
461f03dc TM |
1397 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { |
1398 | u64_stats_update_begin(&sq->stats.syncp); | |
1399 | sq->stats.kicks++; | |
1400 | u64_stats_update_end(&sq->stats.syncp); | |
1401 | } | |
9267c430 | 1402 | } |
186b3c99 | 1403 | |
296f96fc RR |
1404 | return received; |
1405 | } | |
1406 | ||
986a4f4d JW |
1407 | static int virtnet_open(struct net_device *dev) |
1408 | { | |
1409 | struct virtnet_info *vi = netdev_priv(dev); | |
754b8a21 | 1410 | int i, err; |
986a4f4d | 1411 | |
e4166625 JW |
1412 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1413 | if (i < vi->curr_queue_pairs) | |
1414 | /* Make sure we have some buffers: if oom use wq. */ | |
946fa564 | 1415 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
e4166625 | 1416 | schedule_delayed_work(&vi->refill, 0); |
754b8a21 JDB |
1417 | |
1418 | err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); | |
1419 | if (err < 0) | |
1420 | return err; | |
1421 | ||
8d5d8852 JDB |
1422 | err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, |
1423 | MEM_TYPE_PAGE_SHARED, NULL); | |
1424 | if (err < 0) { | |
1425 | xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); | |
1426 | return err; | |
1427 | } | |
1428 | ||
e4e8452a | 1429 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
b92f1e67 | 1430 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); |
986a4f4d JW |
1431 | } |
1432 | ||
1433 | return 0; | |
1434 | } | |
1435 | ||
b92f1e67 WB |
1436 | static int virtnet_poll_tx(struct napi_struct *napi, int budget) |
1437 | { | |
1438 | struct send_queue *sq = container_of(napi, struct send_queue, napi); | |
1439 | struct virtnet_info *vi = sq->vq->vdev->priv; | |
1440 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); | |
1441 | ||
1442 | __netif_tx_lock(txq, raw_smp_processor_id()); | |
1443 | free_old_xmit_skbs(sq); | |
1444 | __netif_tx_unlock(txq); | |
1445 | ||
1446 | virtqueue_napi_complete(napi, sq->vq, 0); | |
1447 | ||
1448 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) | |
1449 | netif_tx_wake_queue(txq); | |
1450 | ||
1451 | return 0; | |
1452 | } | |
1453 | ||
e9d7417b | 1454 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
296f96fc | 1455 | { |
012873d0 | 1456 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
296f96fc | 1457 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
e9d7417b | 1458 | struct virtnet_info *vi = sq->vq->vdev->priv; |
e2fcad58 | 1459 | int num_sg; |
012873d0 | 1460 | unsigned hdr_len = vi->hdr_len; |
e7428e95 | 1461 | bool can_push; |
296f96fc | 1462 | |
e174961c | 1463 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
e7428e95 MT |
1464 | |
1465 | can_push = vi->any_header_sg && | |
1466 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && | |
1467 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; | |
1468 | /* Even if we can, don't push here yet as this would skew | |
1469 | * csum_start offset below. */ | |
1470 | if (can_push) | |
012873d0 | 1471 | hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); |
e7428e95 MT |
1472 | else |
1473 | hdr = skb_vnet_hdr(skb); | |
296f96fc | 1474 | |
e858fae2 | 1475 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
fd3a8862 WB |
1476 | virtio_is_little_endian(vi->vdev), false, |
1477 | 0)) | |
e858fae2 | 1478 | BUG(); |
296f96fc | 1479 | |
3f2c31d9 | 1480 | if (vi->mergeable_rx_bufs) |
012873d0 | 1481 | hdr->num_buffers = 0; |
3f2c31d9 | 1482 | |
547c890c | 1483 | sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); |
e7428e95 MT |
1484 | if (can_push) { |
1485 | __skb_push(skb, hdr_len); | |
1486 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); | |
e2fcad58 JD |
1487 | if (unlikely(num_sg < 0)) |
1488 | return num_sg; | |
e7428e95 MT |
1489 | /* Pull header back to avoid skew in tx bytes calculations. */ |
1490 | __skb_pull(skb, hdr_len); | |
1491 | } else { | |
1492 | sg_set_buf(sq->sg, hdr, hdr_len); | |
e2fcad58 JD |
1493 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); |
1494 | if (unlikely(num_sg < 0)) | |
1495 | return num_sg; | |
1496 | num_sg++; | |
e7428e95 | 1497 | } |
9dc7b9e4 | 1498 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
11a3a154 RR |
1499 | } |
1500 | ||
424efe9c | 1501 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
99ffc696 RR |
1502 | { |
1503 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d JW |
1504 | int qnum = skb_get_queue_mapping(skb); |
1505 | struct send_queue *sq = &vi->sq[qnum]; | |
9ed4cb07 | 1506 | int err; |
4b7fd2e6 MT |
1507 | struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); |
1508 | bool kick = !skb->xmit_more; | |
b92f1e67 | 1509 | bool use_napi = sq->napi.weight; |
2cb9c6ba | 1510 | |
2cb9c6ba | 1511 | /* Free up any pending old buffers before queueing new ones. */ |
e9d7417b | 1512 | free_old_xmit_skbs(sq); |
99ffc696 | 1513 | |
bdb12e0d WB |
1514 | if (use_napi && kick) |
1515 | virtqueue_enable_cb_delayed(sq->vq); | |
1516 | ||
074c3582 JK |
1517 | /* timestamp packet in software */ |
1518 | skb_tx_timestamp(skb); | |
1519 | ||
03f191ba | 1520 | /* Try to transmit */ |
b7dfde95 | 1521 | err = xmit_skb(sq, skb); |
48925e37 | 1522 | |
9ed4cb07 | 1523 | /* This should not happen! */ |
681daee2 | 1524 | if (unlikely(err)) { |
9ed4cb07 RR |
1525 | dev->stats.tx_fifo_errors++; |
1526 | if (net_ratelimit()) | |
1527 | dev_warn(&dev->dev, | |
b7dfde95 | 1528 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
58eba97d | 1529 | dev->stats.tx_dropped++; |
85e94525 | 1530 | dev_kfree_skb_any(skb); |
58eba97d | 1531 | return NETDEV_TX_OK; |
296f96fc | 1532 | } |
03f191ba | 1533 | |
48925e37 | 1534 | /* Don't wait up for transmitted skbs to be freed. */ |
b92f1e67 WB |
1535 | if (!use_napi) { |
1536 | skb_orphan(skb); | |
1537 | nf_reset(skb); | |
1538 | } | |
48925e37 | 1539 | |
60302ff6 MT |
1540 | /* If running out of space, stop queue to avoid getting packets that we |
1541 | * are then unable to transmit. | |
1542 | * An alternative would be to force queuing layer to requeue the skb by | |
1543 | * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be | |
1544 | * returned in a normal path of operation: it means that driver is not | |
1545 | * maintaining the TX queue stop/start state properly, and causes | |
1546 | * the stack to do a non-trivial amount of useless work. | |
1547 | * Since most packets only take 1 or 2 ring slots, stopping the queue | |
1548 | * early means 16 slots are typically wasted. | |
d631b94e | 1549 | */ |
b7dfde95 | 1550 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
986a4f4d | 1551 | netif_stop_subqueue(dev, qnum); |
b92f1e67 WB |
1552 | if (!use_napi && |
1553 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { | |
48925e37 | 1554 | /* More just got used, free them then recheck. */ |
b7dfde95 LT |
1555 | free_old_xmit_skbs(sq); |
1556 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { | |
986a4f4d | 1557 | netif_start_subqueue(dev, qnum); |
e9d7417b | 1558 | virtqueue_disable_cb(sq->vq); |
48925e37 RR |
1559 | } |
1560 | } | |
99ffc696 | 1561 | } |
48925e37 | 1562 | |
461f03dc TM |
1563 | if (kick || netif_xmit_stopped(txq)) { |
1564 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { | |
1565 | u64_stats_update_begin(&sq->stats.syncp); | |
1566 | sq->stats.kicks++; | |
1567 | u64_stats_update_end(&sq->stats.syncp); | |
1568 | } | |
1569 | } | |
296f96fc | 1570 | |
0b725a2c | 1571 | return NETDEV_TX_OK; |
c223a078 DM |
1572 | } |
1573 | ||
40cbfc37 AK |
1574 | /* |
1575 | * Send command via the control virtqueue and check status. Commands | |
1576 | * supported by the hypervisor, as indicated by feature bits, should | |
788a8b6d | 1577 | * never fail unless improperly formatted. |
40cbfc37 AK |
1578 | */ |
1579 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |
d24bae32 | 1580 | struct scatterlist *out) |
40cbfc37 | 1581 | { |
f7bc9594 | 1582 | struct scatterlist *sgs[4], hdr, stat; |
d24bae32 | 1583 | unsigned out_num = 0, tmp; |
40cbfc37 AK |
1584 | |
1585 | /* Caller should know better */ | |
f7bc9594 | 1586 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
40cbfc37 | 1587 | |
12e57169 MT |
1588 | vi->ctrl->status = ~0; |
1589 | vi->ctrl->hdr.class = class; | |
1590 | vi->ctrl->hdr.cmd = cmd; | |
f7bc9594 | 1591 | /* Add header */ |
12e57169 | 1592 | sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); |
f7bc9594 | 1593 | sgs[out_num++] = &hdr; |
40cbfc37 | 1594 | |
f7bc9594 RR |
1595 | if (out) |
1596 | sgs[out_num++] = out; | |
40cbfc37 | 1597 | |
f7bc9594 | 1598 | /* Add return status. */ |
12e57169 | 1599 | sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); |
d24bae32 | 1600 | sgs[out_num] = &stat; |
40cbfc37 | 1601 | |
d24bae32 | 1602 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
a7c58146 | 1603 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
40cbfc37 | 1604 | |
67975901 | 1605 | if (unlikely(!virtqueue_kick(vi->cvq))) |
12e57169 | 1606 | return vi->ctrl->status == VIRTIO_NET_OK; |
40cbfc37 AK |
1607 | |
1608 | /* Spin for a response, the kick causes an ioport write, trapping | |
1609 | * into the hypervisor, so the request should be handled immediately. | |
1610 | */ | |
047b9b94 HG |
1611 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
1612 | !virtqueue_is_broken(vi->cvq)) | |
40cbfc37 AK |
1613 | cpu_relax(); |
1614 | ||
12e57169 | 1615 | return vi->ctrl->status == VIRTIO_NET_OK; |
40cbfc37 AK |
1616 | } |
1617 | ||
9c46f6d4 AW |
1618 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
1619 | { | |
1620 | struct virtnet_info *vi = netdev_priv(dev); | |
1621 | struct virtio_device *vdev = vi->vdev; | |
f2f2c8b4 | 1622 | int ret; |
e37e2ff3 | 1623 | struct sockaddr *addr; |
7e58d5ae | 1624 | struct scatterlist sg; |
9c46f6d4 | 1625 | |
ba5e4426 SS |
1626 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) |
1627 | return -EOPNOTSUPP; | |
1628 | ||
801822d1 | 1629 | addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); |
e37e2ff3 AL |
1630 | if (!addr) |
1631 | return -ENOMEM; | |
e37e2ff3 AL |
1632 | |
1633 | ret = eth_prepare_mac_addr_change(dev, addr); | |
f2f2c8b4 | 1634 | if (ret) |
e37e2ff3 | 1635 | goto out; |
9c46f6d4 | 1636 | |
7e58d5ae AK |
1637 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
1638 | sg_init_one(&sg, addr->sa_data, dev->addr_len); | |
1639 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
d24bae32 | 1640 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
7e58d5ae AK |
1641 | dev_warn(&vdev->dev, |
1642 | "Failed to set mac address by vq command.\n"); | |
e37e2ff3 AL |
1643 | ret = -EINVAL; |
1644 | goto out; | |
7e58d5ae | 1645 | } |
7e93a02f MT |
1646 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && |
1647 | !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { | |
855e0c52 RR |
1648 | unsigned int i; |
1649 | ||
1650 | /* Naturally, this has an atomicity problem. */ | |
1651 | for (i = 0; i < dev->addr_len; i++) | |
1652 | virtio_cwrite8(vdev, | |
1653 | offsetof(struct virtio_net_config, mac) + | |
1654 | i, addr->sa_data[i]); | |
7e58d5ae AK |
1655 | } |
1656 | ||
1657 | eth_commit_mac_addr_change(dev, p); | |
e37e2ff3 | 1658 | ret = 0; |
9c46f6d4 | 1659 | |
e37e2ff3 AL |
1660 | out: |
1661 | kfree(addr); | |
1662 | return ret; | |
9c46f6d4 AW |
1663 | } |
1664 | ||
bc1f4470 | 1665 | static void virtnet_stats(struct net_device *dev, |
1666 | struct rtnl_link_stats64 *tot) | |
3fa2a1df | 1667 | { |
1668 | struct virtnet_info *vi = netdev_priv(dev); | |
3fa2a1df | 1669 | unsigned int start; |
d7dfc5cf | 1670 | int i; |
3fa2a1df | 1671 | |
d7dfc5cf | 1672 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2c4a2f7d | 1673 | u64 tpackets, tbytes, rpackets, rbytes, rdrops; |
d7dfc5cf TM |
1674 | struct receive_queue *rq = &vi->rq[i]; |
1675 | struct send_queue *sq = &vi->sq[i]; | |
3fa2a1df | 1676 | |
1677 | do { | |
d7dfc5cf TM |
1678 | start = u64_stats_fetch_begin_irq(&sq->stats.syncp); |
1679 | tpackets = sq->stats.packets; | |
1680 | tbytes = sq->stats.bytes; | |
1681 | } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); | |
83a27052 ED |
1682 | |
1683 | do { | |
d7dfc5cf | 1684 | start = u64_stats_fetch_begin_irq(&rq->stats.syncp); |
d46eeeaf JW |
1685 | rpackets = rq->stats.packets; |
1686 | rbytes = rq->stats.bytes; | |
1687 | rdrops = rq->stats.drops; | |
d7dfc5cf | 1688 | } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); |
3fa2a1df | 1689 | |
1690 | tot->rx_packets += rpackets; | |
1691 | tot->tx_packets += tpackets; | |
1692 | tot->rx_bytes += rbytes; | |
1693 | tot->tx_bytes += tbytes; | |
2c4a2f7d | 1694 | tot->rx_dropped += rdrops; |
3fa2a1df | 1695 | } |
1696 | ||
1697 | tot->tx_dropped = dev->stats.tx_dropped; | |
021ac8d3 | 1698 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
3fa2a1df | 1699 | tot->rx_length_errors = dev->stats.rx_length_errors; |
1700 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | |
3fa2a1df | 1701 | } |
1702 | ||
586d17c5 JW |
1703 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
1704 | { | |
1705 | rtnl_lock(); | |
1706 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, | |
d24bae32 | 1707 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
586d17c5 JW |
1708 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
1709 | rtnl_unlock(); | |
1710 | } | |
1711 | ||
47315329 | 1712 | static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
986a4f4d JW |
1713 | { |
1714 | struct scatterlist sg; | |
986a4f4d JW |
1715 | struct net_device *dev = vi->dev; |
1716 | ||
1717 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) | |
1718 | return 0; | |
1719 | ||
12e57169 MT |
1720 | vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
1721 | sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); | |
986a4f4d JW |
1722 | |
1723 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, | |
d24bae32 | 1724 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
986a4f4d JW |
1725 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
1726 | queue_pairs); | |
1727 | return -EINVAL; | |
55257d72 | 1728 | } else { |
986a4f4d | 1729 | vi->curr_queue_pairs = queue_pairs; |
35ed159b JW |
1730 | /* virtnet_open() will refill when device is going to up. */ |
1731 | if (dev->flags & IFF_UP) | |
1732 | schedule_delayed_work(&vi->refill, 0); | |
55257d72 | 1733 | } |
986a4f4d JW |
1734 | |
1735 | return 0; | |
1736 | } | |
1737 | ||
47315329 JF |
1738 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
1739 | { | |
1740 | int err; | |
1741 | ||
1742 | rtnl_lock(); | |
1743 | err = _virtnet_set_queues(vi, queue_pairs); | |
1744 | rtnl_unlock(); | |
1745 | return err; | |
1746 | } | |
1747 | ||
296f96fc RR |
1748 | static int virtnet_close(struct net_device *dev) |
1749 | { | |
1750 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d | 1751 | int i; |
296f96fc | 1752 | |
b2baed69 RR |
1753 | /* Make sure refill_work doesn't re-enable napi! */ |
1754 | cancel_delayed_work_sync(&vi->refill); | |
986a4f4d | 1755 | |
b92f1e67 | 1756 | for (i = 0; i < vi->max_queue_pairs; i++) { |
754b8a21 | 1757 | xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); |
986a4f4d | 1758 | napi_disable(&vi->rq[i].napi); |
78a57b48 | 1759 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
b92f1e67 | 1760 | } |
296f96fc | 1761 | |
296f96fc RR |
1762 | return 0; |
1763 | } | |
1764 | ||
2af7698e AW |
1765 | static void virtnet_set_rx_mode(struct net_device *dev) |
1766 | { | |
1767 | struct virtnet_info *vi = netdev_priv(dev); | |
f565a7c2 | 1768 | struct scatterlist sg[2]; |
f565a7c2 | 1769 | struct virtio_net_ctrl_mac *mac_data; |
ccffad25 | 1770 | struct netdev_hw_addr *ha; |
32e7bfc4 | 1771 | int uc_count; |
4cd24eaf | 1772 | int mc_count; |
f565a7c2 AW |
1773 | void *buf; |
1774 | int i; | |
2af7698e | 1775 | |
788a8b6d | 1776 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
2af7698e AW |
1777 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
1778 | return; | |
1779 | ||
12e57169 MT |
1780 | vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); |
1781 | vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); | |
2af7698e | 1782 | |
12e57169 | 1783 | sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); |
2af7698e AW |
1784 | |
1785 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
d24bae32 | 1786 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
2af7698e | 1787 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
12e57169 | 1788 | vi->ctrl->promisc ? "en" : "dis"); |
2af7698e | 1789 | |
12e57169 | 1790 | sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); |
2af7698e AW |
1791 | |
1792 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
d24bae32 | 1793 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
2af7698e | 1794 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
12e57169 | 1795 | vi->ctrl->allmulti ? "en" : "dis"); |
f565a7c2 | 1796 | |
32e7bfc4 | 1797 | uc_count = netdev_uc_count(dev); |
4cd24eaf | 1798 | mc_count = netdev_mc_count(dev); |
f565a7c2 | 1799 | /* MAC filter - use one buffer for both lists */ |
4cd24eaf JP |
1800 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
1801 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); | |
1802 | mac_data = buf; | |
e68ed8f0 | 1803 | if (!buf) |
f565a7c2 | 1804 | return; |
f565a7c2 | 1805 | |
23e258e1 AW |
1806 | sg_init_table(sg, 2); |
1807 | ||
f565a7c2 | 1808 | /* Store the unicast list and count in the front of the buffer */ |
fdd819b2 | 1809 | mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); |
ccffad25 | 1810 | i = 0; |
32e7bfc4 | 1811 | netdev_for_each_uc_addr(ha, dev) |
ccffad25 | 1812 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
f565a7c2 AW |
1813 | |
1814 | sg_set_buf(&sg[0], mac_data, | |
32e7bfc4 | 1815 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
f565a7c2 AW |
1816 | |
1817 | /* multicast list and count fill the end */ | |
32e7bfc4 | 1818 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
f565a7c2 | 1819 | |
fdd819b2 | 1820 | mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); |
567ec874 | 1821 | i = 0; |
22bedad3 JP |
1822 | netdev_for_each_mc_addr(ha, dev) |
1823 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); | |
f565a7c2 AW |
1824 | |
1825 | sg_set_buf(&sg[1], mac_data, | |
4cd24eaf | 1826 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
f565a7c2 AW |
1827 | |
1828 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
d24bae32 | 1829 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
99e872ae | 1830 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
f565a7c2 AW |
1831 | |
1832 | kfree(buf); | |
2af7698e AW |
1833 | } |
1834 | ||
80d5c368 PM |
1835 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
1836 | __be16 proto, u16 vid) | |
0bde9569 AW |
1837 | { |
1838 | struct virtnet_info *vi = netdev_priv(dev); | |
1839 | struct scatterlist sg; | |
1840 | ||
d7fad4c8 | 1841 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
12e57169 | 1842 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
0bde9569 AW |
1843 | |
1844 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
d24bae32 | 1845 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
0bde9569 | 1846 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
8e586137 | 1847 | return 0; |
0bde9569 AW |
1848 | } |
1849 | ||
80d5c368 PM |
1850 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
1851 | __be16 proto, u16 vid) | |
0bde9569 AW |
1852 | { |
1853 | struct virtnet_info *vi = netdev_priv(dev); | |
1854 | struct scatterlist sg; | |
1855 | ||
d7fad4c8 | 1856 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
12e57169 | 1857 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
0bde9569 AW |
1858 | |
1859 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
d24bae32 | 1860 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
0bde9569 | 1861 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
8e586137 | 1862 | return 0; |
0bde9569 AW |
1863 | } |
1864 | ||
8898c21c | 1865 | static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) |
986a4f4d JW |
1866 | { |
1867 | int i; | |
1868 | ||
8898c21c WG |
1869 | if (vi->affinity_hint_set) { |
1870 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
19e226e8 CR |
1871 | virtqueue_set_affinity(vi->rq[i].vq, NULL); |
1872 | virtqueue_set_affinity(vi->sq[i].vq, NULL); | |
47be2479 WG |
1873 | } |
1874 | ||
8898c21c WG |
1875 | vi->affinity_hint_set = false; |
1876 | } | |
8898c21c | 1877 | } |
47be2479 | 1878 | |
8898c21c WG |
1879 | static void virtnet_set_affinity(struct virtnet_info *vi) |
1880 | { | |
2ca653d6 CR |
1881 | cpumask_var_t mask; |
1882 | int stragglers; | |
1883 | int group_size; | |
1884 | int i, j, cpu; | |
1885 | int num_cpu; | |
1886 | int stride; | |
1887 | ||
1888 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | |
8898c21c WG |
1889 | virtnet_clean_affinity(vi, -1); |
1890 | return; | |
986a4f4d JW |
1891 | } |
1892 | ||
2ca653d6 CR |
1893 | num_cpu = num_online_cpus(); |
1894 | stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); | |
1895 | stragglers = num_cpu >= vi->curr_queue_pairs ? | |
1896 | num_cpu % vi->curr_queue_pairs : | |
1897 | 0; | |
1898 | cpu = cpumask_next(-1, cpu_online_mask); | |
4d99f660 | 1899 | |
2ca653d6 CR |
1900 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
1901 | group_size = stride + (i < stragglers ? 1 : 0); | |
1902 | ||
1903 | for (j = 0; j < group_size; j++) { | |
1904 | cpumask_set_cpu(cpu, mask); | |
1905 | cpu = cpumask_next_wrap(cpu, cpu_online_mask, | |
1906 | nr_cpu_ids, false); | |
1907 | } | |
1908 | virtqueue_set_affinity(vi->rq[i].vq, mask); | |
1909 | virtqueue_set_affinity(vi->sq[i].vq, mask); | |
1910 | __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false); | |
1911 | cpumask_clear(mask); | |
986a4f4d JW |
1912 | } |
1913 | ||
8898c21c | 1914 | vi->affinity_hint_set = true; |
2ca653d6 | 1915 | free_cpumask_var(mask); |
986a4f4d JW |
1916 | } |
1917 | ||
8017c279 | 1918 | static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) |
8de4b2f3 | 1919 | { |
8017c279 SAS |
1920 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
1921 | node); | |
1922 | virtnet_set_affinity(vi); | |
1923 | return 0; | |
1924 | } | |
8de4b2f3 | 1925 | |
8017c279 SAS |
1926 | static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) |
1927 | { | |
1928 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, | |
1929 | node_dead); | |
1930 | virtnet_set_affinity(vi); | |
1931 | return 0; | |
1932 | } | |
3ab098df | 1933 | |
8017c279 SAS |
1934 | static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) |
1935 | { | |
1936 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, | |
1937 | node); | |
1938 | ||
1939 | virtnet_clean_affinity(vi, cpu); | |
1940 | return 0; | |
1941 | } | |
1942 | ||
1943 | static enum cpuhp_state virtionet_online; | |
1944 | ||
1945 | static int virtnet_cpu_notif_add(struct virtnet_info *vi) | |
1946 | { | |
1947 | int ret; | |
1948 | ||
1949 | ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); | |
1950 | if (ret) | |
1951 | return ret; | |
1952 | ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, | |
1953 | &vi->node_dead); | |
1954 | if (!ret) | |
1955 | return ret; | |
1956 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); | |
1957 | return ret; | |
1958 | } | |
1959 | ||
1960 | static void virtnet_cpu_notif_remove(struct virtnet_info *vi) | |
1961 | { | |
1962 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); | |
1963 | cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, | |
1964 | &vi->node_dead); | |
986a4f4d JW |
1965 | } |
1966 | ||
8f9f4668 RJ |
1967 | static void virtnet_get_ringparam(struct net_device *dev, |
1968 | struct ethtool_ringparam *ring) | |
1969 | { | |
1970 | struct virtnet_info *vi = netdev_priv(dev); | |
1971 | ||
986a4f4d JW |
1972 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
1973 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); | |
8f9f4668 RJ |
1974 | ring->rx_pending = ring->rx_max_pending; |
1975 | ring->tx_pending = ring->tx_max_pending; | |
8f9f4668 RJ |
1976 | } |
1977 | ||
66846048 RJ |
1978 | |
1979 | static void virtnet_get_drvinfo(struct net_device *dev, | |
1980 | struct ethtool_drvinfo *info) | |
1981 | { | |
1982 | struct virtnet_info *vi = netdev_priv(dev); | |
1983 | struct virtio_device *vdev = vi->vdev; | |
1984 | ||
1985 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); | |
1986 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); | |
1987 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); | |
1988 | ||
1989 | } | |
1990 | ||
d73bcd2c JW |
1991 | /* TODO: Eliminate OOO packets during switching */ |
1992 | static int virtnet_set_channels(struct net_device *dev, | |
1993 | struct ethtool_channels *channels) | |
1994 | { | |
1995 | struct virtnet_info *vi = netdev_priv(dev); | |
1996 | u16 queue_pairs = channels->combined_count; | |
1997 | int err; | |
1998 | ||
1999 | /* We don't support separate rx/tx channels. | |
2000 | * We don't allow setting 'other' channels. | |
2001 | */ | |
2002 | if (channels->rx_count || channels->tx_count || channels->other_count) | |
2003 | return -EINVAL; | |
2004 | ||
c18e9cd6 | 2005 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
d73bcd2c JW |
2006 | return -EINVAL; |
2007 | ||
f600b690 JF |
2008 | /* For now we don't support modifying channels while XDP is loaded |
2009 | * also when XDP is loaded all RX queues have XDP programs so we only | |
2010 | * need to check a single RX queue. | |
2011 | */ | |
2012 | if (vi->rq[0].xdp_prog) | |
2013 | return -EINVAL; | |
2014 | ||
47be2479 | 2015 | get_online_cpus(); |
47315329 | 2016 | err = _virtnet_set_queues(vi, queue_pairs); |
d73bcd2c JW |
2017 | if (!err) { |
2018 | netif_set_real_num_tx_queues(dev, queue_pairs); | |
2019 | netif_set_real_num_rx_queues(dev, queue_pairs); | |
2020 | ||
8898c21c | 2021 | virtnet_set_affinity(vi); |
d73bcd2c | 2022 | } |
47be2479 | 2023 | put_online_cpus(); |
d73bcd2c JW |
2024 | |
2025 | return err; | |
2026 | } | |
2027 | ||
d7dfc5cf TM |
2028 | static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
2029 | { | |
2030 | struct virtnet_info *vi = netdev_priv(dev); | |
2031 | char *p = (char *)data; | |
2032 | unsigned int i, j; | |
2033 | ||
2034 | switch (stringset) { | |
2035 | case ETH_SS_STATS: | |
2036 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
2037 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { | |
2038 | snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", | |
2039 | i, virtnet_rq_stats_desc[j].desc); | |
2040 | p += ETH_GSTRING_LEN; | |
2041 | } | |
2042 | } | |
2043 | ||
2044 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
2045 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { | |
2046 | snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s", | |
2047 | i, virtnet_sq_stats_desc[j].desc); | |
2048 | p += ETH_GSTRING_LEN; | |
2049 | } | |
2050 | } | |
2051 | break; | |
2052 | } | |
2053 | } | |
2054 | ||
2055 | static int virtnet_get_sset_count(struct net_device *dev, int sset) | |
2056 | { | |
2057 | struct virtnet_info *vi = netdev_priv(dev); | |
2058 | ||
2059 | switch (sset) { | |
2060 | case ETH_SS_STATS: | |
2061 | return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + | |
2062 | VIRTNET_SQ_STATS_LEN); | |
2063 | default: | |
2064 | return -EOPNOTSUPP; | |
2065 | } | |
2066 | } | |
2067 | ||
2068 | static void virtnet_get_ethtool_stats(struct net_device *dev, | |
2069 | struct ethtool_stats *stats, u64 *data) | |
2070 | { | |
2071 | struct virtnet_info *vi = netdev_priv(dev); | |
2072 | unsigned int idx = 0, start, i, j; | |
2073 | const u8 *stats_base; | |
2074 | size_t offset; | |
2075 | ||
2076 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
2077 | struct receive_queue *rq = &vi->rq[i]; | |
2078 | ||
d46eeeaf | 2079 | stats_base = (u8 *)&rq->stats; |
d7dfc5cf TM |
2080 | do { |
2081 | start = u64_stats_fetch_begin_irq(&rq->stats.syncp); | |
2082 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { | |
2083 | offset = virtnet_rq_stats_desc[j].offset; | |
2084 | data[idx + j] = *(u64 *)(stats_base + offset); | |
2085 | } | |
2086 | } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); | |
2087 | idx += VIRTNET_RQ_STATS_LEN; | |
2088 | } | |
2089 | ||
2090 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
2091 | struct send_queue *sq = &vi->sq[i]; | |
2092 | ||
2093 | stats_base = (u8 *)&sq->stats; | |
2094 | do { | |
2095 | start = u64_stats_fetch_begin_irq(&sq->stats.syncp); | |
2096 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { | |
2097 | offset = virtnet_sq_stats_desc[j].offset; | |
2098 | data[idx + j] = *(u64 *)(stats_base + offset); | |
2099 | } | |
2100 | } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); | |
2101 | idx += VIRTNET_SQ_STATS_LEN; | |
2102 | } | |
2103 | } | |
2104 | ||
d73bcd2c JW |
2105 | static void virtnet_get_channels(struct net_device *dev, |
2106 | struct ethtool_channels *channels) | |
2107 | { | |
2108 | struct virtnet_info *vi = netdev_priv(dev); | |
2109 | ||
2110 | channels->combined_count = vi->curr_queue_pairs; | |
2111 | channels->max_combined = vi->max_queue_pairs; | |
2112 | channels->max_other = 0; | |
2113 | channels->rx_count = 0; | |
2114 | channels->tx_count = 0; | |
2115 | channels->other_count = 0; | |
2116 | } | |
2117 | ||
16032be5 | 2118 | /* Check if the user is trying to change anything besides speed/duplex */ |
ebb6b4b1 PR |
2119 | static bool |
2120 | virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd) | |
16032be5 | 2121 | { |
ebb6b4b1 PR |
2122 | struct ethtool_link_ksettings diff1 = *cmd; |
2123 | struct ethtool_link_ksettings diff2 = {}; | |
16032be5 | 2124 | |
0cf3ace9 NA |
2125 | /* cmd is always set so we need to clear it, validate the port type |
2126 | * and also without autonegotiation we can ignore advertising | |
2127 | */ | |
ebb6b4b1 PR |
2128 | diff1.base.speed = 0; |
2129 | diff2.base.port = PORT_OTHER; | |
2130 | ethtool_link_ksettings_zero_link_mode(&diff1, advertising); | |
2131 | diff1.base.duplex = 0; | |
2132 | diff1.base.cmd = 0; | |
2133 | diff1.base.link_mode_masks_nwords = 0; | |
2134 | ||
2135 | return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) && | |
2136 | bitmap_empty(diff1.link_modes.supported, | |
2137 | __ETHTOOL_LINK_MODE_MASK_NBITS) && | |
2138 | bitmap_empty(diff1.link_modes.advertising, | |
2139 | __ETHTOOL_LINK_MODE_MASK_NBITS) && | |
2140 | bitmap_empty(diff1.link_modes.lp_advertising, | |
2141 | __ETHTOOL_LINK_MODE_MASK_NBITS); | |
16032be5 NA |
2142 | } |
2143 | ||
ebb6b4b1 PR |
2144 | static int virtnet_set_link_ksettings(struct net_device *dev, |
2145 | const struct ethtool_link_ksettings *cmd) | |
16032be5 NA |
2146 | { |
2147 | struct virtnet_info *vi = netdev_priv(dev); | |
2148 | u32 speed; | |
2149 | ||
ebb6b4b1 | 2150 | speed = cmd->base.speed; |
16032be5 NA |
2151 | /* don't allow custom speed and duplex */ |
2152 | if (!ethtool_validate_speed(speed) || | |
ebb6b4b1 | 2153 | !ethtool_validate_duplex(cmd->base.duplex) || |
16032be5 NA |
2154 | !virtnet_validate_ethtool_cmd(cmd)) |
2155 | return -EINVAL; | |
2156 | vi->speed = speed; | |
ebb6b4b1 | 2157 | vi->duplex = cmd->base.duplex; |
16032be5 NA |
2158 | |
2159 | return 0; | |
2160 | } | |
2161 | ||
ebb6b4b1 PR |
2162 | static int virtnet_get_link_ksettings(struct net_device *dev, |
2163 | struct ethtool_link_ksettings *cmd) | |
16032be5 NA |
2164 | { |
2165 | struct virtnet_info *vi = netdev_priv(dev); | |
2166 | ||
ebb6b4b1 PR |
2167 | cmd->base.speed = vi->speed; |
2168 | cmd->base.duplex = vi->duplex; | |
2169 | cmd->base.port = PORT_OTHER; | |
16032be5 NA |
2170 | |
2171 | return 0; | |
2172 | } | |
2173 | ||
0c465be1 JW |
2174 | static int virtnet_set_coalesce(struct net_device *dev, |
2175 | struct ethtool_coalesce *ec) | |
2176 | { | |
2177 | struct ethtool_coalesce ec_default = { | |
2178 | .cmd = ETHTOOL_SCOALESCE, | |
2179 | .rx_max_coalesced_frames = 1, | |
2180 | }; | |
2181 | struct virtnet_info *vi = netdev_priv(dev); | |
2182 | int i, napi_weight; | |
2183 | ||
2184 | if (ec->tx_max_coalesced_frames > 1) | |
2185 | return -EINVAL; | |
2186 | ||
2187 | ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; | |
2188 | napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; | |
2189 | ||
2190 | /* disallow changes to fields not explicitly tested above */ | |
2191 | if (memcmp(ec, &ec_default, sizeof(ec_default))) | |
2192 | return -EINVAL; | |
2193 | ||
2194 | if (napi_weight ^ vi->sq[0].napi.weight) { | |
2195 | if (dev->flags & IFF_UP) | |
2196 | return -EBUSY; | |
2197 | for (i = 0; i < vi->max_queue_pairs; i++) | |
2198 | vi->sq[i].napi.weight = napi_weight; | |
2199 | } | |
2200 | ||
2201 | return 0; | |
2202 | } | |
2203 | ||
2204 | static int virtnet_get_coalesce(struct net_device *dev, | |
2205 | struct ethtool_coalesce *ec) | |
2206 | { | |
2207 | struct ethtool_coalesce ec_default = { | |
2208 | .cmd = ETHTOOL_GCOALESCE, | |
2209 | .rx_max_coalesced_frames = 1, | |
2210 | }; | |
2211 | struct virtnet_info *vi = netdev_priv(dev); | |
2212 | ||
2213 | memcpy(ec, &ec_default, sizeof(ec_default)); | |
2214 | ||
2215 | if (vi->sq[0].napi.weight) | |
2216 | ec->tx_max_coalesced_frames = 1; | |
2217 | ||
2218 | return 0; | |
2219 | } | |
2220 | ||
16032be5 NA |
2221 | static void virtnet_init_settings(struct net_device *dev) |
2222 | { | |
2223 | struct virtnet_info *vi = netdev_priv(dev); | |
2224 | ||
2225 | vi->speed = SPEED_UNKNOWN; | |
2226 | vi->duplex = DUPLEX_UNKNOWN; | |
2227 | } | |
2228 | ||
faa9b39f JB |
2229 | static void virtnet_update_settings(struct virtnet_info *vi) |
2230 | { | |
2231 | u32 speed; | |
2232 | u8 duplex; | |
2233 | ||
2234 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) | |
2235 | return; | |
2236 | ||
2237 | speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, | |
2238 | speed)); | |
2239 | if (ethtool_validate_speed(speed)) | |
2240 | vi->speed = speed; | |
2241 | duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, | |
2242 | duplex)); | |
2243 | if (ethtool_validate_duplex(duplex)) | |
2244 | vi->duplex = duplex; | |
2245 | } | |
2246 | ||
0fc0b732 | 2247 | static const struct ethtool_ops virtnet_ethtool_ops = { |
66846048 | 2248 | .get_drvinfo = virtnet_get_drvinfo, |
9f4d26d0 | 2249 | .get_link = ethtool_op_get_link, |
8f9f4668 | 2250 | .get_ringparam = virtnet_get_ringparam, |
d7dfc5cf TM |
2251 | .get_strings = virtnet_get_strings, |
2252 | .get_sset_count = virtnet_get_sset_count, | |
2253 | .get_ethtool_stats = virtnet_get_ethtool_stats, | |
d73bcd2c JW |
2254 | .set_channels = virtnet_set_channels, |
2255 | .get_channels = virtnet_get_channels, | |
074c3582 | 2256 | .get_ts_info = ethtool_op_get_ts_info, |
ebb6b4b1 PR |
2257 | .get_link_ksettings = virtnet_get_link_ksettings, |
2258 | .set_link_ksettings = virtnet_set_link_ksettings, | |
0c465be1 JW |
2259 | .set_coalesce = virtnet_set_coalesce, |
2260 | .get_coalesce = virtnet_get_coalesce, | |
a9ea3fc6 HX |
2261 | }; |
2262 | ||
9fe7bfce JF |
2263 | static void virtnet_freeze_down(struct virtio_device *vdev) |
2264 | { | |
2265 | struct virtnet_info *vi = vdev->priv; | |
2266 | int i; | |
2267 | ||
2268 | /* Make sure no work handler is accessing the device */ | |
2269 | flush_work(&vi->config_work); | |
2270 | ||
05c998b7 | 2271 | netif_tx_lock_bh(vi->dev); |
9fe7bfce | 2272 | netif_device_detach(vi->dev); |
05c998b7 | 2273 | netif_tx_unlock_bh(vi->dev); |
9fe7bfce JF |
2274 | cancel_delayed_work_sync(&vi->refill); |
2275 | ||
2276 | if (netif_running(vi->dev)) { | |
b92f1e67 | 2277 | for (i = 0; i < vi->max_queue_pairs; i++) { |
9fe7bfce | 2278 | napi_disable(&vi->rq[i].napi); |
78a57b48 | 2279 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
b92f1e67 | 2280 | } |
9fe7bfce JF |
2281 | } |
2282 | } | |
2283 | ||
2284 | static int init_vqs(struct virtnet_info *vi); | |
2285 | ||
2286 | static int virtnet_restore_up(struct virtio_device *vdev) | |
2287 | { | |
2288 | struct virtnet_info *vi = vdev->priv; | |
2289 | int err, i; | |
2290 | ||
2291 | err = init_vqs(vi); | |
2292 | if (err) | |
2293 | return err; | |
2294 | ||
2295 | virtio_device_ready(vdev); | |
2296 | ||
2297 | if (netif_running(vi->dev)) { | |
2298 | for (i = 0; i < vi->curr_queue_pairs; i++) | |
2299 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) | |
2300 | schedule_delayed_work(&vi->refill, 0); | |
2301 | ||
b92f1e67 | 2302 | for (i = 0; i < vi->max_queue_pairs; i++) { |
e4e8452a | 2303 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
b92f1e67 WB |
2304 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
2305 | &vi->sq[i].napi); | |
2306 | } | |
9fe7bfce JF |
2307 | } |
2308 | ||
05c998b7 | 2309 | netif_tx_lock_bh(vi->dev); |
9fe7bfce | 2310 | netif_device_attach(vi->dev); |
05c998b7 | 2311 | netif_tx_unlock_bh(vi->dev); |
9fe7bfce JF |
2312 | return err; |
2313 | } | |
2314 | ||
3f93522f JW |
2315 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) |
2316 | { | |
2317 | struct scatterlist sg; | |
12e57169 | 2318 | vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); |
3f93522f | 2319 | |
12e57169 | 2320 | sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); |
3f93522f JW |
2321 | |
2322 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, | |
2323 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { | |
2324 | dev_warn(&vi->dev->dev, "Fail to set guest offload. \n"); | |
2325 | return -EINVAL; | |
2326 | } | |
2327 | ||
2328 | return 0; | |
2329 | } | |
2330 | ||
2331 | static int virtnet_clear_guest_offloads(struct virtnet_info *vi) | |
2332 | { | |
2333 | u64 offloads = 0; | |
2334 | ||
2335 | if (!vi->guest_offloads) | |
2336 | return 0; | |
2337 | ||
3f93522f JW |
2338 | return virtnet_set_guest_offloads(vi, offloads); |
2339 | } | |
2340 | ||
2341 | static int virtnet_restore_guest_offloads(struct virtnet_info *vi) | |
2342 | { | |
2343 | u64 offloads = vi->guest_offloads; | |
2344 | ||
2345 | if (!vi->guest_offloads) | |
2346 | return 0; | |
3f93522f JW |
2347 | |
2348 | return virtnet_set_guest_offloads(vi, offloads); | |
2349 | } | |
2350 | ||
9861ce03 JK |
2351 | static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
2352 | struct netlink_ext_ack *extack) | |
f600b690 JF |
2353 | { |
2354 | unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); | |
2355 | struct virtnet_info *vi = netdev_priv(dev); | |
2356 | struct bpf_prog *old_prog; | |
017b29c3 | 2357 | u16 xdp_qp = 0, curr_qp; |
672aafd5 | 2358 | int i, err; |
f600b690 | 2359 | |
3f93522f JW |
2360 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
2361 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || | |
2362 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || | |
2363 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || | |
18ba58e1 JW |
2364 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || |
2365 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { | |
2366 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); | |
f600b690 JF |
2367 | return -EOPNOTSUPP; |
2368 | } | |
2369 | ||
2370 | if (vi->mergeable_rx_bufs && !vi->any_header_sg) { | |
4d463c4d | 2371 | NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); |
f600b690 JF |
2372 | return -EINVAL; |
2373 | } | |
2374 | ||
2375 | if (dev->mtu > max_sz) { | |
4d463c4d | 2376 | NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); |
f600b690 JF |
2377 | netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); |
2378 | return -EINVAL; | |
2379 | } | |
2380 | ||
672aafd5 JF |
2381 | curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; |
2382 | if (prog) | |
2383 | xdp_qp = nr_cpu_ids; | |
2384 | ||
2385 | /* XDP requires extra queues for XDP_TX */ | |
2386 | if (curr_qp + xdp_qp > vi->max_queue_pairs) { | |
4d463c4d | 2387 | NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); |
672aafd5 JF |
2388 | netdev_warn(dev, "request %i queues but max is %i\n", |
2389 | curr_qp + xdp_qp, vi->max_queue_pairs); | |
2390 | return -ENOMEM; | |
2391 | } | |
2392 | ||
2de2f7f4 JF |
2393 | if (prog) { |
2394 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); | |
2395 | if (IS_ERR(prog)) | |
2396 | return PTR_ERR(prog); | |
2397 | } | |
2398 | ||
4941d472 | 2399 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
4e09ff53 JW |
2400 | if (netif_running(dev)) |
2401 | for (i = 0; i < vi->max_queue_pairs; i++) | |
2402 | napi_disable(&vi->rq[i].napi); | |
f600b690 | 2403 | |
672aafd5 | 2404 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); |
4941d472 JW |
2405 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
2406 | if (err) | |
2407 | goto err; | |
2408 | vi->xdp_queue_pairs = xdp_qp; | |
672aafd5 | 2409 | |
f600b690 JF |
2410 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2411 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); | |
2412 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | |
3f93522f JW |
2413 | if (i == 0) { |
2414 | if (!old_prog) | |
2415 | virtnet_clear_guest_offloads(vi); | |
2416 | if (!prog) | |
2417 | virtnet_restore_guest_offloads(vi); | |
2418 | } | |
f600b690 JF |
2419 | if (old_prog) |
2420 | bpf_prog_put(old_prog); | |
4e09ff53 JW |
2421 | if (netif_running(dev)) |
2422 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | |
f600b690 JF |
2423 | } |
2424 | ||
2425 | return 0; | |
2de2f7f4 | 2426 | |
4941d472 JW |
2427 | err: |
2428 | for (i = 0; i < vi->max_queue_pairs; i++) | |
2429 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | |
2de2f7f4 JF |
2430 | if (prog) |
2431 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); | |
2432 | return err; | |
f600b690 JF |
2433 | } |
2434 | ||
5b0e6629 | 2435 | static u32 virtnet_xdp_query(struct net_device *dev) |
f600b690 JF |
2436 | { |
2437 | struct virtnet_info *vi = netdev_priv(dev); | |
5b0e6629 | 2438 | const struct bpf_prog *xdp_prog; |
f600b690 JF |
2439 | int i; |
2440 | ||
2441 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
5b0e6629 MKL |
2442 | xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
2443 | if (xdp_prog) | |
2444 | return xdp_prog->aux->id; | |
f600b690 | 2445 | } |
5b0e6629 | 2446 | return 0; |
f600b690 JF |
2447 | } |
2448 | ||
f4e63525 | 2449 | static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
f600b690 JF |
2450 | { |
2451 | switch (xdp->command) { | |
2452 | case XDP_SETUP_PROG: | |
9861ce03 | 2453 | return virtnet_xdp_set(dev, xdp->prog, xdp->extack); |
f600b690 | 2454 | case XDP_QUERY_PROG: |
5b0e6629 | 2455 | xdp->prog_id = virtnet_xdp_query(dev); |
f600b690 JF |
2456 | return 0; |
2457 | default: | |
2458 | return -EINVAL; | |
2459 | } | |
2460 | } | |
2461 | ||
ba5e4426 SS |
2462 | static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, |
2463 | size_t len) | |
2464 | { | |
2465 | struct virtnet_info *vi = netdev_priv(dev); | |
2466 | int ret; | |
2467 | ||
2468 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) | |
2469 | return -EOPNOTSUPP; | |
2470 | ||
2471 | ret = snprintf(buf, len, "sby"); | |
2472 | if (ret >= len) | |
2473 | return -EOPNOTSUPP; | |
2474 | ||
2475 | return 0; | |
2476 | } | |
2477 | ||
76288b4e SH |
2478 | static const struct net_device_ops virtnet_netdev = { |
2479 | .ndo_open = virtnet_open, | |
2480 | .ndo_stop = virtnet_close, | |
2481 | .ndo_start_xmit = start_xmit, | |
2482 | .ndo_validate_addr = eth_validate_addr, | |
9c46f6d4 | 2483 | .ndo_set_mac_address = virtnet_set_mac_address, |
2af7698e | 2484 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
3fa2a1df | 2485 | .ndo_get_stats64 = virtnet_stats, |
1824a989 AW |
2486 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
2487 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, | |
f4e63525 | 2488 | .ndo_bpf = virtnet_xdp, |
186b3c99 | 2489 | .ndo_xdp_xmit = virtnet_xdp_xmit, |
2836b4f2 | 2490 | .ndo_features_check = passthru_features_check, |
ba5e4426 | 2491 | .ndo_get_phys_port_name = virtnet_get_phys_port_name, |
76288b4e SH |
2492 | }; |
2493 | ||
586d17c5 | 2494 | static void virtnet_config_changed_work(struct work_struct *work) |
9f4d26d0 | 2495 | { |
586d17c5 JW |
2496 | struct virtnet_info *vi = |
2497 | container_of(work, struct virtnet_info, config_work); | |
9f4d26d0 MM |
2498 | u16 v; |
2499 | ||
855e0c52 RR |
2500 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
2501 | struct virtio_net_config, status, &v) < 0) | |
507613bf | 2502 | return; |
586d17c5 JW |
2503 | |
2504 | if (v & VIRTIO_NET_S_ANNOUNCE) { | |
ee89bab1 | 2505 | netdev_notify_peers(vi->dev); |
586d17c5 JW |
2506 | virtnet_ack_link_announce(vi); |
2507 | } | |
9f4d26d0 MM |
2508 | |
2509 | /* Ignore unknown (future) status bits */ | |
2510 | v &= VIRTIO_NET_S_LINK_UP; | |
2511 | ||
2512 | if (vi->status == v) | |
507613bf | 2513 | return; |
9f4d26d0 MM |
2514 | |
2515 | vi->status = v; | |
2516 | ||
2517 | if (vi->status & VIRTIO_NET_S_LINK_UP) { | |
faa9b39f | 2518 | virtnet_update_settings(vi); |
9f4d26d0 | 2519 | netif_carrier_on(vi->dev); |
986a4f4d | 2520 | netif_tx_wake_all_queues(vi->dev); |
9f4d26d0 MM |
2521 | } else { |
2522 | netif_carrier_off(vi->dev); | |
986a4f4d | 2523 | netif_tx_stop_all_queues(vi->dev); |
9f4d26d0 MM |
2524 | } |
2525 | } | |
2526 | ||
2527 | static void virtnet_config_changed(struct virtio_device *vdev) | |
2528 | { | |
2529 | struct virtnet_info *vi = vdev->priv; | |
2530 | ||
3b07e9ca | 2531 | schedule_work(&vi->config_work); |
9f4d26d0 MM |
2532 | } |
2533 | ||
986a4f4d JW |
2534 | static void virtnet_free_queues(struct virtnet_info *vi) |
2535 | { | |
d4fb84ee AV |
2536 | int i; |
2537 | ||
ab3971b1 JW |
2538 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2539 | napi_hash_del(&vi->rq[i].napi); | |
d4fb84ee | 2540 | netif_napi_del(&vi->rq[i].napi); |
b92f1e67 | 2541 | netif_napi_del(&vi->sq[i].napi); |
ab3971b1 | 2542 | } |
d4fb84ee | 2543 | |
963abe5c ED |
2544 | /* We called napi_hash_del() before netif_napi_del(), |
2545 | * we need to respect an RCU grace period before freeing vi->rq | |
2546 | */ | |
2547 | synchronize_net(); | |
2548 | ||
986a4f4d JW |
2549 | kfree(vi->rq); |
2550 | kfree(vi->sq); | |
12e57169 | 2551 | kfree(vi->ctrl); |
986a4f4d JW |
2552 | } |
2553 | ||
47315329 | 2554 | static void _free_receive_bufs(struct virtnet_info *vi) |
986a4f4d | 2555 | { |
f600b690 | 2556 | struct bpf_prog *old_prog; |
986a4f4d JW |
2557 | int i; |
2558 | ||
2559 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2560 | while (vi->rq[i].pages) | |
2561 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); | |
f600b690 JF |
2562 | |
2563 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); | |
2564 | RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); | |
2565 | if (old_prog) | |
2566 | bpf_prog_put(old_prog); | |
986a4f4d | 2567 | } |
47315329 JF |
2568 | } |
2569 | ||
2570 | static void free_receive_bufs(struct virtnet_info *vi) | |
2571 | { | |
2572 | rtnl_lock(); | |
2573 | _free_receive_bufs(vi); | |
f600b690 | 2574 | rtnl_unlock(); |
986a4f4d JW |
2575 | } |
2576 | ||
fb51879d MD |
2577 | static void free_receive_page_frags(struct virtnet_info *vi) |
2578 | { | |
2579 | int i; | |
2580 | for (i = 0; i < vi->max_queue_pairs; i++) | |
2581 | if (vi->rq[i].alloc_frag.page) | |
2582 | put_page(vi->rq[i].alloc_frag.page); | |
2583 | } | |
2584 | ||
b68df015 | 2585 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) |
56434a01 JF |
2586 | { |
2587 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | |
2588 | return false; | |
2589 | else if (q < vi->curr_queue_pairs) | |
2590 | return true; | |
2591 | else | |
2592 | return false; | |
2593 | } | |
2594 | ||
986a4f4d JW |
2595 | static void free_unused_bufs(struct virtnet_info *vi) |
2596 | { | |
2597 | void *buf; | |
2598 | int i; | |
2599 | ||
2600 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2601 | struct virtqueue *vq = vi->sq[i].vq; | |
56434a01 | 2602 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
b68df015 | 2603 | if (!is_xdp_raw_buffer_queue(vi, i)) |
56434a01 JF |
2604 | dev_kfree_skb(buf); |
2605 | else | |
2606 | put_page(virt_to_head_page(buf)); | |
2607 | } | |
986a4f4d JW |
2608 | } |
2609 | ||
2610 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2611 | struct virtqueue *vq = vi->rq[i].vq; | |
2612 | ||
2613 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | |
ab7db917 | 2614 | if (vi->mergeable_rx_bufs) { |
680557cf | 2615 | put_page(virt_to_head_page(buf)); |
ab7db917 | 2616 | } else if (vi->big_packets) { |
fa9fac17 | 2617 | give_pages(&vi->rq[i], buf); |
ab7db917 | 2618 | } else { |
f6b10209 | 2619 | put_page(virt_to_head_page(buf)); |
ab7db917 | 2620 | } |
986a4f4d | 2621 | } |
986a4f4d JW |
2622 | } |
2623 | } | |
2624 | ||
e9d7417b JW |
2625 | static void virtnet_del_vqs(struct virtnet_info *vi) |
2626 | { | |
2627 | struct virtio_device *vdev = vi->vdev; | |
2628 | ||
8898c21c | 2629 | virtnet_clean_affinity(vi, -1); |
986a4f4d | 2630 | |
e9d7417b | 2631 | vdev->config->del_vqs(vdev); |
986a4f4d JW |
2632 | |
2633 | virtnet_free_queues(vi); | |
e9d7417b JW |
2634 | } |
2635 | ||
d85b758f MT |
2636 | /* How large should a single buffer be so a queue full of these can fit at |
2637 | * least one full packet? | |
2638 | * Logic below assumes the mergeable buffer header is used. | |
2639 | */ | |
2640 | static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) | |
2641 | { | |
2642 | const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); | |
2643 | unsigned int rq_size = virtqueue_get_vring_size(vq); | |
2644 | unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; | |
2645 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; | |
2646 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); | |
2647 | ||
f0c3192c MT |
2648 | return max(max(min_buf_len, hdr_len) - hdr_len, |
2649 | (unsigned int)GOOD_PACKET_LEN); | |
d85b758f MT |
2650 | } |
2651 | ||
986a4f4d | 2652 | static int virtnet_find_vqs(struct virtnet_info *vi) |
3f9c10b0 | 2653 | { |
986a4f4d JW |
2654 | vq_callback_t **callbacks; |
2655 | struct virtqueue **vqs; | |
2656 | int ret = -ENOMEM; | |
2657 | int i, total_vqs; | |
2658 | const char **names; | |
d45b897b | 2659 | bool *ctx; |
986a4f4d JW |
2660 | |
2661 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by | |
2662 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by | |
2663 | * possible control vq. | |
2664 | */ | |
2665 | total_vqs = vi->max_queue_pairs * 2 + | |
2666 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); | |
2667 | ||
2668 | /* Allocate space for find_vqs parameters */ | |
6396bb22 | 2669 | vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); |
986a4f4d JW |
2670 | if (!vqs) |
2671 | goto err_vq; | |
6da2ec56 | 2672 | callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); |
986a4f4d JW |
2673 | if (!callbacks) |
2674 | goto err_callback; | |
6da2ec56 | 2675 | names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); |
986a4f4d JW |
2676 | if (!names) |
2677 | goto err_names; | |
192f68cf | 2678 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
6396bb22 | 2679 | ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); |
d45b897b MT |
2680 | if (!ctx) |
2681 | goto err_ctx; | |
2682 | } else { | |
2683 | ctx = NULL; | |
2684 | } | |
986a4f4d JW |
2685 | |
2686 | /* Parameters for control virtqueue, if any */ | |
2687 | if (vi->has_cvq) { | |
2688 | callbacks[total_vqs - 1] = NULL; | |
2689 | names[total_vqs - 1] = "control"; | |
2690 | } | |
3f9c10b0 | 2691 | |
986a4f4d JW |
2692 | /* Allocate/initialize parameters for send/receive virtqueues */ |
2693 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2694 | callbacks[rxq2vq(i)] = skb_recv_done; | |
2695 | callbacks[txq2vq(i)] = skb_xmit_done; | |
2696 | sprintf(vi->rq[i].name, "input.%d", i); | |
2697 | sprintf(vi->sq[i].name, "output.%d", i); | |
2698 | names[rxq2vq(i)] = vi->rq[i].name; | |
2699 | names[txq2vq(i)] = vi->sq[i].name; | |
d45b897b MT |
2700 | if (ctx) |
2701 | ctx[rxq2vq(i)] = true; | |
986a4f4d | 2702 | } |
3f9c10b0 | 2703 | |
986a4f4d | 2704 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
d45b897b | 2705 | names, ctx, NULL); |
986a4f4d JW |
2706 | if (ret) |
2707 | goto err_find; | |
3f9c10b0 | 2708 | |
986a4f4d JW |
2709 | if (vi->has_cvq) { |
2710 | vi->cvq = vqs[total_vqs - 1]; | |
3f9c10b0 | 2711 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
f646968f | 2712 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
3f9c10b0 | 2713 | } |
986a4f4d JW |
2714 | |
2715 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2716 | vi->rq[i].vq = vqs[rxq2vq(i)]; | |
d85b758f | 2717 | vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); |
986a4f4d JW |
2718 | vi->sq[i].vq = vqs[txq2vq(i)]; |
2719 | } | |
2720 | ||
2fa3c8a8 | 2721 | /* run here: ret == 0. */ |
986a4f4d | 2722 | |
986a4f4d JW |
2723 | |
2724 | err_find: | |
d45b897b MT |
2725 | kfree(ctx); |
2726 | err_ctx: | |
986a4f4d JW |
2727 | kfree(names); |
2728 | err_names: | |
2729 | kfree(callbacks); | |
2730 | err_callback: | |
2731 | kfree(vqs); | |
2732 | err_vq: | |
2733 | return ret; | |
2734 | } | |
2735 | ||
2736 | static int virtnet_alloc_queues(struct virtnet_info *vi) | |
2737 | { | |
2738 | int i; | |
2739 | ||
12e57169 MT |
2740 | vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); |
2741 | if (!vi->ctrl) | |
2742 | goto err_ctrl; | |
6396bb22 | 2743 | vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); |
986a4f4d JW |
2744 | if (!vi->sq) |
2745 | goto err_sq; | |
6396bb22 | 2746 | vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); |
008d4278 | 2747 | if (!vi->rq) |
986a4f4d JW |
2748 | goto err_rq; |
2749 | ||
2750 | INIT_DELAYED_WORK(&vi->refill, refill_work); | |
2751 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2752 | vi->rq[i].pages = NULL; | |
2753 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, | |
2754 | napi_weight); | |
1d11e732 WB |
2755 | netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, |
2756 | napi_tx ? napi_weight : 0); | |
986a4f4d JW |
2757 | |
2758 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); | |
5377d758 | 2759 | ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); |
986a4f4d | 2760 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
d7dfc5cf TM |
2761 | |
2762 | u64_stats_init(&vi->rq[i].stats.syncp); | |
2763 | u64_stats_init(&vi->sq[i].stats.syncp); | |
986a4f4d JW |
2764 | } |
2765 | ||
2766 | return 0; | |
2767 | ||
2768 | err_rq: | |
2769 | kfree(vi->sq); | |
2770 | err_sq: | |
12e57169 MT |
2771 | kfree(vi->ctrl); |
2772 | err_ctrl: | |
986a4f4d JW |
2773 | return -ENOMEM; |
2774 | } | |
2775 | ||
2776 | static int init_vqs(struct virtnet_info *vi) | |
2777 | { | |
2778 | int ret; | |
2779 | ||
2780 | /* Allocate send & receive queues */ | |
2781 | ret = virtnet_alloc_queues(vi); | |
2782 | if (ret) | |
2783 | goto err; | |
2784 | ||
2785 | ret = virtnet_find_vqs(vi); | |
2786 | if (ret) | |
2787 | goto err_free; | |
2788 | ||
47be2479 | 2789 | get_online_cpus(); |
8898c21c | 2790 | virtnet_set_affinity(vi); |
47be2479 WG |
2791 | put_online_cpus(); |
2792 | ||
986a4f4d JW |
2793 | return 0; |
2794 | ||
2795 | err_free: | |
2796 | virtnet_free_queues(vi); | |
2797 | err: | |
2798 | return ret; | |
3f9c10b0 AS |
2799 | } |
2800 | ||
fbf28d78 MD |
2801 | #ifdef CONFIG_SYSFS |
2802 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, | |
718ad681 | 2803 | char *buf) |
fbf28d78 MD |
2804 | { |
2805 | struct virtnet_info *vi = netdev_priv(queue->dev); | |
2806 | unsigned int queue_index = get_netdev_rx_queue_index(queue); | |
3cc81a9a JW |
2807 | unsigned int headroom = virtnet_get_headroom(vi); |
2808 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; | |
5377d758 | 2809 | struct ewma_pkt_len *avg; |
fbf28d78 MD |
2810 | |
2811 | BUG_ON(queue_index >= vi->max_queue_pairs); | |
2812 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; | |
d85b758f | 2813 | return sprintf(buf, "%u\n", |
3cc81a9a JW |
2814 | get_mergeable_buf_len(&vi->rq[queue_index], avg, |
2815 | SKB_DATA_ALIGN(headroom + tailroom))); | |
fbf28d78 MD |
2816 | } |
2817 | ||
2818 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = | |
2819 | __ATTR_RO(mergeable_rx_buffer_size); | |
2820 | ||
2821 | static struct attribute *virtio_net_mrg_rx_attrs[] = { | |
2822 | &mergeable_rx_buffer_size_attribute.attr, | |
2823 | NULL | |
2824 | }; | |
2825 | ||
2826 | static const struct attribute_group virtio_net_mrg_rx_group = { | |
2827 | .name = "virtio_net", | |
2828 | .attrs = virtio_net_mrg_rx_attrs | |
2829 | }; | |
2830 | #endif | |
2831 | ||
892d6eb1 JW |
2832 | static bool virtnet_fail_on_feature(struct virtio_device *vdev, |
2833 | unsigned int fbit, | |
2834 | const char *fname, const char *dname) | |
2835 | { | |
2836 | if (!virtio_has_feature(vdev, fbit)) | |
2837 | return false; | |
2838 | ||
2839 | dev_err(&vdev->dev, "device advertises feature %s but not %s", | |
2840 | fname, dname); | |
2841 | ||
2842 | return true; | |
2843 | } | |
2844 | ||
2845 | #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ | |
2846 | virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) | |
2847 | ||
2848 | static bool virtnet_validate_features(struct virtio_device *vdev) | |
2849 | { | |
2850 | if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && | |
2851 | (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, | |
2852 | "VIRTIO_NET_F_CTRL_VQ") || | |
2853 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, | |
2854 | "VIRTIO_NET_F_CTRL_VQ") || | |
2855 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, | |
2856 | "VIRTIO_NET_F_CTRL_VQ") || | |
2857 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || | |
2858 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, | |
2859 | "VIRTIO_NET_F_CTRL_VQ"))) { | |
2860 | return false; | |
2861 | } | |
2862 | ||
2863 | return true; | |
2864 | } | |
2865 | ||
d0c2c997 JW |
2866 | #define MIN_MTU ETH_MIN_MTU |
2867 | #define MAX_MTU ETH_MAX_MTU | |
2868 | ||
fe36cbe0 | 2869 | static int virtnet_validate(struct virtio_device *vdev) |
296f96fc | 2870 | { |
6ba42248 MT |
2871 | if (!vdev->config->get) { |
2872 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | |
2873 | __func__); | |
2874 | return -EINVAL; | |
2875 | } | |
2876 | ||
892d6eb1 JW |
2877 | if (!virtnet_validate_features(vdev)) |
2878 | return -EINVAL; | |
2879 | ||
fe36cbe0 MT |
2880 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
2881 | int mtu = virtio_cread16(vdev, | |
2882 | offsetof(struct virtio_net_config, | |
2883 | mtu)); | |
2884 | if (mtu < MIN_MTU) | |
2885 | __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); | |
2886 | } | |
2887 | ||
2888 | return 0; | |
2889 | } | |
2890 | ||
2891 | static int virtnet_probe(struct virtio_device *vdev) | |
2892 | { | |
d7dfc5cf | 2893 | int i, err = -ENOMEM; |
fe36cbe0 MT |
2894 | struct net_device *dev; |
2895 | struct virtnet_info *vi; | |
2896 | u16 max_queue_pairs; | |
2897 | int mtu; | |
2898 | ||
986a4f4d | 2899 | /* Find if host supports multiqueue virtio_net device */ |
855e0c52 RR |
2900 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
2901 | struct virtio_net_config, | |
2902 | max_virtqueue_pairs, &max_queue_pairs); | |
986a4f4d JW |
2903 | |
2904 | /* We need at least 2 queue's */ | |
2905 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || | |
2906 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || | |
2907 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) | |
2908 | max_queue_pairs = 1; | |
296f96fc RR |
2909 | |
2910 | /* Allocate ourselves a network device with room for our info */ | |
986a4f4d | 2911 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
296f96fc RR |
2912 | if (!dev) |
2913 | return -ENOMEM; | |
2914 | ||
2915 | /* Set up network device as normal. */ | |
f2f2c8b4 | 2916 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
76288b4e | 2917 | dev->netdev_ops = &virtnet_netdev; |
296f96fc | 2918 | dev->features = NETIF_F_HIGHDMA; |
3fa2a1df | 2919 | |
7ad24ea4 | 2920 | dev->ethtool_ops = &virtnet_ethtool_ops; |
296f96fc RR |
2921 | SET_NETDEV_DEV(dev, &vdev->dev); |
2922 | ||
2923 | /* Do we support "hardware" checksums? */ | |
98e778c9 | 2924 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
296f96fc | 2925 | /* This opens up the world of extra features. */ |
48900cb6 | 2926 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
98e778c9 | 2927 | if (csum) |
48900cb6 | 2928 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
98e778c9 MM |
2929 | |
2930 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { | |
e078de03 | 2931 | dev->hw_features |= NETIF_F_TSO |
34a48579 RR |
2932 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
2933 | } | |
5539ae96 | 2934 | /* Individual feature bits: what can host handle? */ |
98e778c9 MM |
2935 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
2936 | dev->hw_features |= NETIF_F_TSO; | |
2937 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) | |
2938 | dev->hw_features |= NETIF_F_TSO6; | |
2939 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) | |
2940 | dev->hw_features |= NETIF_F_TSO_ECN; | |
98e778c9 | 2941 | |
41f2f127 JW |
2942 | dev->features |= NETIF_F_GSO_ROBUST; |
2943 | ||
98e778c9 | 2944 | if (gso) |
e078de03 | 2945 | dev->features |= dev->hw_features & NETIF_F_ALL_TSO; |
98e778c9 | 2946 | /* (!csum && gso) case will be fixed by register_netdev() */ |
296f96fc | 2947 | } |
4f49129b TH |
2948 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
2949 | dev->features |= NETIF_F_RXCSUM; | |
296f96fc | 2950 | |
4fda8302 JW |
2951 | dev->vlan_features = dev->features; |
2952 | ||
d0c2c997 JW |
2953 | /* MTU range: 68 - 65535 */ |
2954 | dev->min_mtu = MIN_MTU; | |
2955 | dev->max_mtu = MAX_MTU; | |
2956 | ||
296f96fc | 2957 | /* Configuration may specify what MAC to use. Otherwise random. */ |
855e0c52 RR |
2958 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
2959 | virtio_cread_bytes(vdev, | |
2960 | offsetof(struct virtio_net_config, mac), | |
2961 | dev->dev_addr, dev->addr_len); | |
2962 | else | |
f2cedb63 | 2963 | eth_hw_addr_random(dev); |
296f96fc RR |
2964 | |
2965 | /* Set up our device-specific information */ | |
2966 | vi = netdev_priv(dev); | |
296f96fc RR |
2967 | vi->dev = dev; |
2968 | vi->vdev = vdev; | |
d9d5dcc8 | 2969 | vdev->priv = vi; |
827da44c | 2970 | |
586d17c5 | 2971 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
296f96fc | 2972 | |
97402b96 | 2973 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
8e95a202 JP |
2974 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
2975 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || | |
e3e3c423 VY |
2976 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
2977 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) | |
97402b96 HX |
2978 | vi->big_packets = true; |
2979 | ||
3f2c31d9 MM |
2980 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
2981 | vi->mergeable_rx_bufs = true; | |
2982 | ||
d04302b3 MT |
2983 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || |
2984 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | |
012873d0 MT |
2985 | vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
2986 | else | |
2987 | vi->hdr_len = sizeof(struct virtio_net_hdr); | |
2988 | ||
75993300 MT |
2989 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || |
2990 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | |
e7428e95 MT |
2991 | vi->any_header_sg = true; |
2992 | ||
986a4f4d JW |
2993 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
2994 | vi->has_cvq = true; | |
2995 | ||
14de9d11 AC |
2996 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
2997 | mtu = virtio_cread16(vdev, | |
2998 | offsetof(struct virtio_net_config, | |
2999 | mtu)); | |
93a205ee | 3000 | if (mtu < dev->min_mtu) { |
fe36cbe0 MT |
3001 | /* Should never trigger: MTU was previously validated |
3002 | * in virtnet_validate. | |
3003 | */ | |
3004 | dev_err(&vdev->dev, "device MTU appears to have changed " | |
3005 | "it is now %d < %d", mtu, dev->min_mtu); | |
d7dfc5cf | 3006 | goto free; |
93a205ee | 3007 | } |
2e123b44 | 3008 | |
fe36cbe0 MT |
3009 | dev->mtu = mtu; |
3010 | dev->max_mtu = mtu; | |
3011 | ||
2e123b44 MT |
3012 | /* TODO: size buffers correctly in this case. */ |
3013 | if (dev->mtu > ETH_DATA_LEN) | |
3014 | vi->big_packets = true; | |
14de9d11 AC |
3015 | } |
3016 | ||
012873d0 MT |
3017 | if (vi->any_header_sg) |
3018 | dev->needed_headroom = vi->hdr_len; | |
6ebbc1a6 | 3019 | |
44900010 JW |
3020 | /* Enable multiqueue by default */ |
3021 | if (num_online_cpus() >= max_queue_pairs) | |
3022 | vi->curr_queue_pairs = max_queue_pairs; | |
3023 | else | |
3024 | vi->curr_queue_pairs = num_online_cpus(); | |
986a4f4d JW |
3025 | vi->max_queue_pairs = max_queue_pairs; |
3026 | ||
3027 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ | |
3f9c10b0 | 3028 | err = init_vqs(vi); |
d2a7ddda | 3029 | if (err) |
d7dfc5cf | 3030 | goto free; |
296f96fc | 3031 | |
fbf28d78 MD |
3032 | #ifdef CONFIG_SYSFS |
3033 | if (vi->mergeable_rx_bufs) | |
3034 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; | |
3035 | #endif | |
0f13b66b ZYW |
3036 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
3037 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); | |
986a4f4d | 3038 | |
16032be5 NA |
3039 | virtnet_init_settings(dev); |
3040 | ||
ba5e4426 SS |
3041 | if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { |
3042 | vi->failover = net_failover_create(vi->dev); | |
4b8e6ac4 WY |
3043 | if (IS_ERR(vi->failover)) { |
3044 | err = PTR_ERR(vi->failover); | |
ba5e4426 | 3045 | goto free_vqs; |
4b8e6ac4 | 3046 | } |
ba5e4426 SS |
3047 | } |
3048 | ||
296f96fc RR |
3049 | err = register_netdev(dev); |
3050 | if (err) { | |
3051 | pr_debug("virtio_net: registering device failed\n"); | |
ba5e4426 | 3052 | goto free_failover; |
296f96fc | 3053 | } |
b3369c1f | 3054 | |
4baf1e33 MT |
3055 | virtio_device_ready(vdev); |
3056 | ||
8017c279 | 3057 | err = virtnet_cpu_notif_add(vi); |
8de4b2f3 WG |
3058 | if (err) { |
3059 | pr_debug("virtio_net: registering cpu notifier failed\n"); | |
f00e35e2 | 3060 | goto free_unregister_netdev; |
8de4b2f3 WG |
3061 | } |
3062 | ||
a220871b | 3063 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
44900010 | 3064 | |
167c25e4 JW |
3065 | /* Assume link up if device can't report link status, |
3066 | otherwise get link status from config. */ | |
bda7fab5 | 3067 | netif_carrier_off(dev); |
167c25e4 | 3068 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
3b07e9ca | 3069 | schedule_work(&vi->config_work); |
167c25e4 JW |
3070 | } else { |
3071 | vi->status = VIRTIO_NET_S_LINK_UP; | |
faa9b39f | 3072 | virtnet_update_settings(vi); |
167c25e4 JW |
3073 | netif_carrier_on(dev); |
3074 | } | |
9f4d26d0 | 3075 | |
3f93522f JW |
3076 | for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) |
3077 | if (virtio_has_feature(vi->vdev, guest_offloads[i])) | |
3078 | set_bit(guest_offloads[i], &vi->guest_offloads); | |
3079 | ||
986a4f4d JW |
3080 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
3081 | dev->name, max_queue_pairs); | |
3082 | ||
296f96fc RR |
3083 | return 0; |
3084 | ||
f00e35e2 | 3085 | free_unregister_netdev: |
02465555 MT |
3086 | vi->vdev->config->reset(vdev); |
3087 | ||
b3369c1f | 3088 | unregister_netdev(dev); |
ba5e4426 SS |
3089 | free_failover: |
3090 | net_failover_destroy(vi->failover); | |
d2a7ddda | 3091 | free_vqs: |
986a4f4d | 3092 | cancel_delayed_work_sync(&vi->refill); |
fb51879d | 3093 | free_receive_page_frags(vi); |
e9d7417b | 3094 | virtnet_del_vqs(vi); |
296f96fc RR |
3095 | free: |
3096 | free_netdev(dev); | |
3097 | return err; | |
3098 | } | |
3099 | ||
04486ed0 | 3100 | static void remove_vq_common(struct virtnet_info *vi) |
296f96fc | 3101 | { |
04486ed0 | 3102 | vi->vdev->config->reset(vi->vdev); |
830a8a97 SM |
3103 | |
3104 | /* Free unused buffers in both send and recv, if any. */ | |
9ab86bbc | 3105 | free_unused_bufs(vi); |
fb6813f4 | 3106 | |
986a4f4d | 3107 | free_receive_bufs(vi); |
d2a7ddda | 3108 | |
fb51879d MD |
3109 | free_receive_page_frags(vi); |
3110 | ||
986a4f4d | 3111 | virtnet_del_vqs(vi); |
04486ed0 AS |
3112 | } |
3113 | ||
8cc085d6 | 3114 | static void virtnet_remove(struct virtio_device *vdev) |
04486ed0 AS |
3115 | { |
3116 | struct virtnet_info *vi = vdev->priv; | |
3117 | ||
8017c279 | 3118 | virtnet_cpu_notif_remove(vi); |
8de4b2f3 | 3119 | |
102a2786 MT |
3120 | /* Make sure no work handler is accessing the device. */ |
3121 | flush_work(&vi->config_work); | |
586d17c5 | 3122 | |
04486ed0 AS |
3123 | unregister_netdev(vi->dev); |
3124 | ||
ba5e4426 SS |
3125 | net_failover_destroy(vi->failover); |
3126 | ||
04486ed0 | 3127 | remove_vq_common(vi); |
fb6813f4 | 3128 | |
74b2553f | 3129 | free_netdev(vi->dev); |
296f96fc RR |
3130 | } |
3131 | ||
67a75194 | 3132 | static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) |
0741bcb5 AS |
3133 | { |
3134 | struct virtnet_info *vi = vdev->priv; | |
3135 | ||
8017c279 | 3136 | virtnet_cpu_notif_remove(vi); |
9fe7bfce | 3137 | virtnet_freeze_down(vdev); |
0741bcb5 AS |
3138 | remove_vq_common(vi); |
3139 | ||
3140 | return 0; | |
3141 | } | |
3142 | ||
67a75194 | 3143 | static __maybe_unused int virtnet_restore(struct virtio_device *vdev) |
0741bcb5 AS |
3144 | { |
3145 | struct virtnet_info *vi = vdev->priv; | |
9fe7bfce | 3146 | int err; |
0741bcb5 | 3147 | |
9fe7bfce | 3148 | err = virtnet_restore_up(vdev); |
0741bcb5 AS |
3149 | if (err) |
3150 | return err; | |
986a4f4d JW |
3151 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
3152 | ||
8017c279 | 3153 | err = virtnet_cpu_notif_add(vi); |
ec9debbd JW |
3154 | if (err) |
3155 | return err; | |
3156 | ||
0741bcb5 AS |
3157 | return 0; |
3158 | } | |
0741bcb5 | 3159 | |
296f96fc RR |
3160 | static struct virtio_device_id id_table[] = { |
3161 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, | |
3162 | { 0 }, | |
3163 | }; | |
3164 | ||
f3358507 MT |
3165 | #define VIRTNET_FEATURES \ |
3166 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ | |
3167 | VIRTIO_NET_F_MAC, \ | |
3168 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ | |
3169 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ | |
3170 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ | |
3171 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ | |
3172 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ | |
3173 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ | |
3174 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ | |
faa9b39f | 3175 | VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ |
9805069d | 3176 | VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY |
f3358507 | 3177 | |
c45a6816 | 3178 | static unsigned int features[] = { |
f3358507 MT |
3179 | VIRTNET_FEATURES, |
3180 | }; | |
3181 | ||
3182 | static unsigned int features_legacy[] = { | |
3183 | VIRTNET_FEATURES, | |
3184 | VIRTIO_NET_F_GSO, | |
e7428e95 | 3185 | VIRTIO_F_ANY_LAYOUT, |
c45a6816 RR |
3186 | }; |
3187 | ||
22402529 | 3188 | static struct virtio_driver virtio_net_driver = { |
c45a6816 RR |
3189 | .feature_table = features, |
3190 | .feature_table_size = ARRAY_SIZE(features), | |
f3358507 MT |
3191 | .feature_table_legacy = features_legacy, |
3192 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), | |
296f96fc RR |
3193 | .driver.name = KBUILD_MODNAME, |
3194 | .driver.owner = THIS_MODULE, | |
3195 | .id_table = id_table, | |
fe36cbe0 | 3196 | .validate = virtnet_validate, |
296f96fc | 3197 | .probe = virtnet_probe, |
8cc085d6 | 3198 | .remove = virtnet_remove, |
9f4d26d0 | 3199 | .config_changed = virtnet_config_changed, |
89107000 | 3200 | #ifdef CONFIG_PM_SLEEP |
0741bcb5 AS |
3201 | .freeze = virtnet_freeze, |
3202 | .restore = virtnet_restore, | |
3203 | #endif | |
296f96fc RR |
3204 | }; |
3205 | ||
8017c279 SAS |
3206 | static __init int virtio_net_driver_init(void) |
3207 | { | |
3208 | int ret; | |
3209 | ||
73c1b41e | 3210 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", |
8017c279 SAS |
3211 | virtnet_cpu_online, |
3212 | virtnet_cpu_down_prep); | |
3213 | if (ret < 0) | |
3214 | goto out; | |
3215 | virtionet_online = ret; | |
73c1b41e | 3216 | ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", |
8017c279 SAS |
3217 | NULL, virtnet_cpu_dead); |
3218 | if (ret) | |
3219 | goto err_dead; | |
3220 | ||
3221 | ret = register_virtio_driver(&virtio_net_driver); | |
3222 | if (ret) | |
3223 | goto err_virtio; | |
3224 | return 0; | |
3225 | err_virtio: | |
3226 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); | |
3227 | err_dead: | |
3228 | cpuhp_remove_multi_state(virtionet_online); | |
3229 | out: | |
3230 | return ret; | |
3231 | } | |
3232 | module_init(virtio_net_driver_init); | |
3233 | ||
3234 | static __exit void virtio_net_driver_exit(void) | |
3235 | { | |
cfa0ebc9 | 3236 | unregister_virtio_driver(&virtio_net_driver); |
8017c279 SAS |
3237 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
3238 | cpuhp_remove_multi_state(virtionet_online); | |
8017c279 SAS |
3239 | } |
3240 | module_exit(virtio_net_driver_exit); | |
296f96fc RR |
3241 | |
3242 | MODULE_DEVICE_TABLE(virtio, id_table); | |
3243 | MODULE_DESCRIPTION("Virtio network driver"); | |
3244 | MODULE_LICENSE("GPL"); |