]>
Commit | Line | Data |
---|---|---|
fbe78f4f AL |
1 | /* |
2 | * Virtio Network Device | |
3 | * | |
4 | * Copyright IBM, Corp. 2007 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
9b8bfe21 | 14 | #include "qemu/osdep.h" |
9711cd0d | 15 | #include "qemu/atomic.h" |
1de7afc9 | 16 | #include "qemu/iov.h" |
db725815 | 17 | #include "qemu/main-loop.h" |
0b8fa32f | 18 | #include "qemu/module.h" |
0d09e41a | 19 | #include "hw/virtio/virtio.h" |
1422e32d | 20 | #include "net/net.h" |
7200ac3c | 21 | #include "net/checksum.h" |
a8ed73f7 | 22 | #include "net/tap.h" |
1de7afc9 PB |
23 | #include "qemu/error-report.h" |
24 | #include "qemu/timer.h" | |
9711cd0d JF |
25 | #include "qemu/option.h" |
26 | #include "qemu/option_int.h" | |
27 | #include "qemu/config-file.h" | |
28 | #include "qapi/qmp/qdict.h" | |
0d09e41a PB |
29 | #include "hw/virtio/virtio-net.h" |
30 | #include "net/vhost_net.h" | |
9d8c6a25 | 31 | #include "net/announce.h" |
17ec5a86 | 32 | #include "hw/virtio/virtio-bus.h" |
e688df6b | 33 | #include "qapi/error.h" |
9af23989 | 34 | #include "qapi/qapi-events-net.h" |
a27bd6c7 | 35 | #include "hw/qdev-properties.h" |
9711cd0d JF |
36 | #include "qapi/qapi-types-migration.h" |
37 | #include "qapi/qapi-events-migration.h" | |
1399c60d | 38 | #include "hw/virtio/virtio-access.h" |
f8d806c9 | 39 | #include "migration/misc.h" |
9473939e | 40 | #include "standard-headers/linux/ethtool.h" |
2f780b6a | 41 | #include "sysemu/sysemu.h" |
9d8c6a25 | 42 | #include "trace.h" |
9711cd0d JF |
43 | #include "monitor/qdev.h" |
44 | #include "hw/pci/pci.h" | |
fbe78f4f | 45 | |
0ce0e8f4 | 46 | #define VIRTIO_NET_VM_VERSION 11 |
b6503ed9 | 47 | |
4ffb17f5 | 48 | #define MAC_TABLE_ENTRIES 64 |
f21c0ed9 | 49 | #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ |
9d6271b8 | 50 | |
1c0fbfa3 MT |
51 | /* previously fixed value */ |
52 | #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256 | |
9b02e161 WW |
53 | #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256 |
54 | ||
1c0fbfa3 MT |
55 | /* for now, only allow larger queues; with virtio-1, guest can downsize */ |
56 | #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE | |
9b02e161 | 57 | #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE |
1c0fbfa3 | 58 | |
2974e916 YB |
59 | #define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */ |
60 | ||
61 | #define VIRTIO_NET_TCP_FLAG 0x3F | |
62 | #define VIRTIO_NET_TCP_HDR_LENGTH 0xF000 | |
63 | ||
64 | /* IPv4 max payload, 16 bits in the header */ | |
65 | #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header)) | |
66 | #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535 | |
67 | ||
68 | /* header length value in ip header without option */ | |
69 | #define VIRTIO_NET_IP4_HEADER_LENGTH 5 | |
70 | ||
71 | #define VIRTIO_NET_IP6_ADDR_SIZE 32 /* ipv6 saddr + daddr */ | |
72 | #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD | |
73 | ||
74 | /* Purge coalesced packets timer interval, This value affects the performance | |
75 | a lot, and should be tuned carefully, '300000'(300us) is the recommended | |
76 | value to pass the WHQL test, '50000' can gain 2x netperf throughput with | |
77 | tso/gso/gro 'off'. */ | |
78 | #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000 | |
79 | ||
80 | /* temporary until standard header include it */ | |
81 | #if !defined(VIRTIO_NET_HDR_F_RSC_INFO) | |
82 | ||
83 | #define VIRTIO_NET_HDR_F_RSC_INFO 4 /* rsc_ext data in csum_ fields */ | |
d47e5e31 | 84 | #define VIRTIO_NET_F_RSC_EXT 61 |
2974e916 YB |
85 | |
86 | static inline __virtio16 *virtio_net_rsc_ext_num_packets( | |
87 | struct virtio_net_hdr *hdr) | |
88 | { | |
89 | return &hdr->csum_start; | |
90 | } | |
91 | ||
92 | static inline __virtio16 *virtio_net_rsc_ext_num_dupacks( | |
93 | struct virtio_net_hdr *hdr) | |
94 | { | |
95 | return &hdr->csum_offset; | |
96 | } | |
97 | ||
98 | #endif | |
99 | ||
14f9b664 | 100 | static VirtIOFeature feature_sizes[] = { |
127833ee | 101 | {.flags = 1ULL << VIRTIO_NET_F_MAC, |
5d5b33c0 | 102 | .end = endof(struct virtio_net_config, mac)}, |
127833ee | 103 | {.flags = 1ULL << VIRTIO_NET_F_STATUS, |
5d5b33c0 | 104 | .end = endof(struct virtio_net_config, status)}, |
127833ee | 105 | {.flags = 1ULL << VIRTIO_NET_F_MQ, |
5d5b33c0 | 106 | .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, |
127833ee | 107 | {.flags = 1ULL << VIRTIO_NET_F_MTU, |
5d5b33c0 | 108 | .end = endof(struct virtio_net_config, mtu)}, |
9473939e | 109 | {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX, |
5d5b33c0 | 110 | .end = endof(struct virtio_net_config, duplex)}, |
14f9b664 JL |
111 | {} |
112 | }; | |
113 | ||
fed699f9 | 114 | static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) |
0c87e93e JW |
115 | { |
116 | VirtIONet *n = qemu_get_nic_opaque(nc); | |
117 | ||
fed699f9 | 118 | return &n->vqs[nc->queue_index]; |
0c87e93e | 119 | } |
fed699f9 JW |
120 | |
121 | static int vq2q(int queue_index) | |
122 | { | |
123 | return queue_index / 2; | |
124 | } | |
125 | ||
fbe78f4f AL |
126 | /* TODO |
127 | * - we could suppress RX interrupt if we were so inclined. | |
128 | */ | |
129 | ||
0f03eca6 | 130 | static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) |
fbe78f4f | 131 | { |
17a0ca55 | 132 | VirtIONet *n = VIRTIO_NET(vdev); |
fbe78f4f AL |
133 | struct virtio_net_config netcfg; |
134 | ||
1399c60d RR |
135 | virtio_stw_p(vdev, &netcfg.status, n->status); |
136 | virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); | |
a93e599d | 137 | virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu); |
79674068 | 138 | memcpy(netcfg.mac, n->mac, ETH_ALEN); |
9473939e JB |
139 | virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed); |
140 | netcfg.duplex = n->net_conf.duplex; | |
14f9b664 | 141 | memcpy(config, &netcfg, n->config_size); |
fbe78f4f AL |
142 | } |
143 | ||
0f03eca6 AL |
144 | static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) |
145 | { | |
17a0ca55 | 146 | VirtIONet *n = VIRTIO_NET(vdev); |
14f9b664 | 147 | struct virtio_net_config netcfg = {}; |
0f03eca6 | 148 | |
14f9b664 | 149 | memcpy(&netcfg, config, n->config_size); |
0f03eca6 | 150 | |
95129d6f CH |
151 | if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) && |
152 | !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) && | |
c1943a3f | 153 | memcmp(netcfg.mac, n->mac, ETH_ALEN)) { |
79674068 | 154 | memcpy(n->mac, netcfg.mac, ETH_ALEN); |
b356f76d | 155 | qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); |
0f03eca6 AL |
156 | } |
157 | } | |
158 | ||
783e7706 MT |
159 | static bool virtio_net_started(VirtIONet *n, uint8_t status) |
160 | { | |
17a0ca55 | 161 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
783e7706 | 162 | return (status & VIRTIO_CONFIG_S_DRIVER_OK) && |
17a0ca55 | 163 | (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; |
783e7706 MT |
164 | } |
165 | ||
b2c929f0 DDAG |
166 | static void virtio_net_announce_notify(VirtIONet *net) |
167 | { | |
168 | VirtIODevice *vdev = VIRTIO_DEVICE(net); | |
169 | trace_virtio_net_announce_notify(); | |
170 | ||
171 | net->status |= VIRTIO_NET_S_ANNOUNCE; | |
172 | virtio_notify_config(vdev); | |
173 | } | |
174 | ||
f57fcf70 JW |
175 | static void virtio_net_announce_timer(void *opaque) |
176 | { | |
177 | VirtIONet *n = opaque; | |
9d8c6a25 | 178 | trace_virtio_net_announce_timer(n->announce_timer.round); |
f57fcf70 | 179 | |
9d8c6a25 | 180 | n->announce_timer.round--; |
b2c929f0 DDAG |
181 | virtio_net_announce_notify(n); |
182 | } | |
183 | ||
184 | static void virtio_net_announce(NetClientState *nc) | |
185 | { | |
186 | VirtIONet *n = qemu_get_nic_opaque(nc); | |
187 | VirtIODevice *vdev = VIRTIO_DEVICE(n); | |
188 | ||
189 | /* | |
190 | * Make sure the virtio migration announcement timer isn't running | |
191 | * If it is, let it trigger announcement so that we do not cause | |
192 | * confusion. | |
193 | */ | |
194 | if (n->announce_timer.round) { | |
195 | return; | |
196 | } | |
197 | ||
198 | if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && | |
199 | virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { | |
200 | virtio_net_announce_notify(n); | |
201 | } | |
f57fcf70 JW |
202 | } |
203 | ||
783e7706 | 204 | static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) |
afbaa7b4 | 205 | { |
17a0ca55 | 206 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
b356f76d | 207 | NetClientState *nc = qemu_get_queue(n->nic); |
fed699f9 | 208 | int queues = n->multiqueue ? n->max_queues : 1; |
b356f76d | 209 | |
ed8b4afe | 210 | if (!get_vhost_net(nc->peer)) { |
afbaa7b4 MT |
211 | return; |
212 | } | |
fed699f9 | 213 | |
8c1ac475 RK |
214 | if ((virtio_net_started(n, status) && !nc->peer->link_down) == |
215 | !!n->vhost_started) { | |
afbaa7b4 MT |
216 | return; |
217 | } | |
218 | if (!n->vhost_started) { | |
086abc1c MT |
219 | int r, i; |
220 | ||
1bfa316c GK |
221 | if (n->needs_vnet_hdr_swap) { |
222 | error_report("backend does not support %s vnet headers; " | |
223 | "falling back on userspace virtio", | |
224 | virtio_is_big_endian(vdev) ? "BE" : "LE"); | |
225 | return; | |
226 | } | |
227 | ||
086abc1c MT |
228 | /* Any packets outstanding? Purge them to avoid touching rings |
229 | * when vhost is running. | |
230 | */ | |
231 | for (i = 0; i < queues; i++) { | |
232 | NetClientState *qnc = qemu_get_subqueue(n->nic, i); | |
233 | ||
234 | /* Purge both directions: TX and RX. */ | |
235 | qemu_net_queue_purge(qnc->peer->incoming_queue, qnc); | |
236 | qemu_net_queue_purge(qnc->incoming_queue, qnc->peer); | |
237 | } | |
238 | ||
a93e599d MC |
239 | if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) { |
240 | r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu); | |
241 | if (r < 0) { | |
242 | error_report("%uBytes MTU not supported by the backend", | |
243 | n->net_conf.mtu); | |
244 | ||
245 | return; | |
246 | } | |
247 | } | |
248 | ||
1830b80f | 249 | n->vhost_started = 1; |
17a0ca55 | 250 | r = vhost_net_start(vdev, n->nic->ncs, queues); |
afbaa7b4 | 251 | if (r < 0) { |
e7b43f7e SH |
252 | error_report("unable to start vhost net: %d: " |
253 | "falling back on userspace virtio", -r); | |
1830b80f | 254 | n->vhost_started = 0; |
afbaa7b4 MT |
255 | } |
256 | } else { | |
17a0ca55 | 257 | vhost_net_stop(vdev, n->nic->ncs, queues); |
afbaa7b4 MT |
258 | n->vhost_started = 0; |
259 | } | |
260 | } | |
261 | ||
1bfa316c GK |
262 | static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev, |
263 | NetClientState *peer, | |
264 | bool enable) | |
265 | { | |
266 | if (virtio_is_big_endian(vdev)) { | |
267 | return qemu_set_vnet_be(peer, enable); | |
268 | } else { | |
269 | return qemu_set_vnet_le(peer, enable); | |
270 | } | |
271 | } | |
272 | ||
273 | static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, | |
274 | int queues, bool enable) | |
275 | { | |
276 | int i; | |
277 | ||
278 | for (i = 0; i < queues; i++) { | |
279 | if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && | |
280 | enable) { | |
281 | while (--i >= 0) { | |
282 | virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false); | |
283 | } | |
284 | ||
285 | return true; | |
286 | } | |
287 | } | |
288 | ||
289 | return false; | |
290 | } | |
291 | ||
292 | static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) | |
293 | { | |
294 | VirtIODevice *vdev = VIRTIO_DEVICE(n); | |
295 | int queues = n->multiqueue ? n->max_queues : 1; | |
296 | ||
297 | if (virtio_net_started(n, status)) { | |
298 | /* Before using the device, we tell the network backend about the | |
299 | * endianness to use when parsing vnet headers. If the backend | |
300 | * can't do it, we fallback onto fixing the headers in the core | |
301 | * virtio-net code. | |
302 | */ | |
303 | n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs, | |
304 | queues, true); | |
305 | } else if (virtio_net_started(n, vdev->status)) { | |
306 | /* After using the device, we need to reset the network backend to | |
307 | * the default (guest native endianness), otherwise the guest may | |
308 | * lose network connectivity if it is rebooted into a different | |
309 | * endianness. | |
310 | */ | |
311 | virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false); | |
312 | } | |
313 | } | |
314 | ||
283e2c2a YB |
315 | static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq) |
316 | { | |
317 | unsigned int dropped = virtqueue_drop_all(vq); | |
318 | if (dropped) { | |
319 | virtio_notify(vdev, vq); | |
320 | } | |
321 | } | |
322 | ||
783e7706 MT |
323 | static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) |
324 | { | |
17a0ca55 | 325 | VirtIONet *n = VIRTIO_NET(vdev); |
fed699f9 JW |
326 | VirtIONetQueue *q; |
327 | int i; | |
328 | uint8_t queue_status; | |
783e7706 | 329 | |
1bfa316c | 330 | virtio_net_vnet_endian_status(n, status); |
783e7706 MT |
331 | virtio_net_vhost_status(n, status); |
332 | ||
fed699f9 | 333 | for (i = 0; i < n->max_queues; i++) { |
38705bb5 FZ |
334 | NetClientState *ncs = qemu_get_subqueue(n->nic, i); |
335 | bool queue_started; | |
fed699f9 | 336 | q = &n->vqs[i]; |
783e7706 | 337 | |
fed699f9 JW |
338 | if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { |
339 | queue_status = 0; | |
783e7706 | 340 | } else { |
fed699f9 | 341 | queue_status = status; |
783e7706 | 342 | } |
38705bb5 FZ |
343 | queue_started = |
344 | virtio_net_started(n, queue_status) && !n->vhost_started; | |
345 | ||
346 | if (queue_started) { | |
347 | qemu_flush_queued_packets(ncs); | |
348 | } | |
fed699f9 JW |
349 | |
350 | if (!q->tx_waiting) { | |
351 | continue; | |
352 | } | |
353 | ||
38705bb5 | 354 | if (queue_started) { |
fed699f9 | 355 | if (q->tx_timer) { |
bc72ad67 AB |
356 | timer_mod(q->tx_timer, |
357 | qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); | |
fed699f9 JW |
358 | } else { |
359 | qemu_bh_schedule(q->tx_bh); | |
360 | } | |
783e7706 | 361 | } else { |
fed699f9 | 362 | if (q->tx_timer) { |
bc72ad67 | 363 | timer_del(q->tx_timer); |
fed699f9 JW |
364 | } else { |
365 | qemu_bh_cancel(q->tx_bh); | |
366 | } | |
283e2c2a | 367 | if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 && |
70e53e6e JW |
368 | (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) && |
369 | vdev->vm_running) { | |
283e2c2a YB |
370 | /* if tx is waiting we are likely have some packets in tx queue |
371 | * and disabled notification */ | |
372 | q->tx_waiting = 0; | |
373 | virtio_queue_set_notification(q->tx_vq, 1); | |
374 | virtio_net_drop_tx_queue_data(vdev, q->tx_vq); | |
375 | } | |
783e7706 MT |
376 | } |
377 | } | |
378 | } | |
379 | ||
4e68f7a0 | 380 | static void virtio_net_set_link_status(NetClientState *nc) |
554c97dd | 381 | { |
cc1f0f45 | 382 | VirtIONet *n = qemu_get_nic_opaque(nc); |
17a0ca55 | 383 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
554c97dd AL |
384 | uint16_t old_status = n->status; |
385 | ||
eb6b6c12 | 386 | if (nc->link_down) |
554c97dd AL |
387 | n->status &= ~VIRTIO_NET_S_LINK_UP; |
388 | else | |
389 | n->status |= VIRTIO_NET_S_LINK_UP; | |
390 | ||
391 | if (n->status != old_status) | |
17a0ca55 | 392 | virtio_notify_config(vdev); |
afbaa7b4 | 393 | |
17a0ca55 | 394 | virtio_net_set_status(vdev, vdev->status); |
554c97dd AL |
395 | } |
396 | ||
b1be4280 AK |
397 | static void rxfilter_notify(NetClientState *nc) |
398 | { | |
b1be4280 AK |
399 | VirtIONet *n = qemu_get_nic_opaque(nc); |
400 | ||
401 | if (nc->rxfilter_notify_enabled) { | |
96e35046 | 402 | gchar *path = object_get_canonical_path(OBJECT(n->qdev)); |
06150279 | 403 | qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, |
3ab72385 | 404 | n->netclient_name, path); |
96e35046 | 405 | g_free(path); |
b1be4280 AK |
406 | |
407 | /* disable event notification to avoid events flooding */ | |
408 | nc->rxfilter_notify_enabled = 0; | |
409 | } | |
410 | } | |
411 | ||
f7bc8ef8 AK |
412 | static intList *get_vlan_table(VirtIONet *n) |
413 | { | |
414 | intList *list, *entry; | |
415 | int i, j; | |
416 | ||
417 | list = NULL; | |
418 | for (i = 0; i < MAX_VLAN >> 5; i++) { | |
419 | for (j = 0; n->vlans[i] && j <= 0x1f; j++) { | |
420 | if (n->vlans[i] & (1U << j)) { | |
421 | entry = g_malloc0(sizeof(*entry)); | |
422 | entry->value = (i << 5) + j; | |
423 | entry->next = list; | |
424 | list = entry; | |
425 | } | |
426 | } | |
427 | } | |
428 | ||
429 | return list; | |
430 | } | |
431 | ||
b1be4280 AK |
432 | static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) |
433 | { | |
434 | VirtIONet *n = qemu_get_nic_opaque(nc); | |
f7bc8ef8 | 435 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
b1be4280 AK |
436 | RxFilterInfo *info; |
437 | strList *str_list, *entry; | |
f7bc8ef8 | 438 | int i; |
b1be4280 AK |
439 | |
440 | info = g_malloc0(sizeof(*info)); | |
441 | info->name = g_strdup(nc->name); | |
442 | info->promiscuous = n->promisc; | |
443 | ||
444 | if (n->nouni) { | |
445 | info->unicast = RX_STATE_NONE; | |
446 | } else if (n->alluni) { | |
447 | info->unicast = RX_STATE_ALL; | |
448 | } else { | |
449 | info->unicast = RX_STATE_NORMAL; | |
450 | } | |
451 | ||
452 | if (n->nomulti) { | |
453 | info->multicast = RX_STATE_NONE; | |
454 | } else if (n->allmulti) { | |
455 | info->multicast = RX_STATE_ALL; | |
456 | } else { | |
457 | info->multicast = RX_STATE_NORMAL; | |
458 | } | |
459 | ||
460 | info->broadcast_allowed = n->nobcast; | |
461 | info->multicast_overflow = n->mac_table.multi_overflow; | |
462 | info->unicast_overflow = n->mac_table.uni_overflow; | |
463 | ||
b0575ba4 | 464 | info->main_mac = qemu_mac_strdup_printf(n->mac); |
b1be4280 AK |
465 | |
466 | str_list = NULL; | |
467 | for (i = 0; i < n->mac_table.first_multi; i++) { | |
468 | entry = g_malloc0(sizeof(*entry)); | |
b0575ba4 | 469 | entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); |
b1be4280 AK |
470 | entry->next = str_list; |
471 | str_list = entry; | |
472 | } | |
473 | info->unicast_table = str_list; | |
474 | ||
475 | str_list = NULL; | |
476 | for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { | |
477 | entry = g_malloc0(sizeof(*entry)); | |
b0575ba4 | 478 | entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); |
b1be4280 AK |
479 | entry->next = str_list; |
480 | str_list = entry; | |
481 | } | |
482 | info->multicast_table = str_list; | |
f7bc8ef8 | 483 | info->vlan_table = get_vlan_table(n); |
b1be4280 | 484 | |
95129d6f | 485 | if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) { |
f7bc8ef8 AK |
486 | info->vlan = RX_STATE_ALL; |
487 | } else if (!info->vlan_table) { | |
488 | info->vlan = RX_STATE_NONE; | |
489 | } else { | |
490 | info->vlan = RX_STATE_NORMAL; | |
b1be4280 | 491 | } |
b1be4280 AK |
492 | |
493 | /* enable event notification after query */ | |
494 | nc->rxfilter_notify_enabled = 1; | |
495 | ||
496 | return info; | |
497 | } | |
498 | ||
002437cd AL |
499 | static void virtio_net_reset(VirtIODevice *vdev) |
500 | { | |
17a0ca55 | 501 | VirtIONet *n = VIRTIO_NET(vdev); |
94b52958 | 502 | int i; |
002437cd AL |
503 | |
504 | /* Reset back to compatibility mode */ | |
505 | n->promisc = 1; | |
506 | n->allmulti = 0; | |
015cb166 AW |
507 | n->alluni = 0; |
508 | n->nomulti = 0; | |
509 | n->nouni = 0; | |
510 | n->nobcast = 0; | |
fed699f9 JW |
511 | /* multiqueue is disabled by default */ |
512 | n->curr_queues = 1; | |
9d8c6a25 DDAG |
513 | timer_del(n->announce_timer.tm); |
514 | n->announce_timer.round = 0; | |
f57fcf70 | 515 | n->status &= ~VIRTIO_NET_S_ANNOUNCE; |
b6503ed9 | 516 | |
f21c0ed9 | 517 | /* Flush any MAC and VLAN filter table state */ |
b6503ed9 | 518 | n->mac_table.in_use = 0; |
2d9aba39 | 519 | n->mac_table.first_multi = 0; |
8fd2a2f1 AW |
520 | n->mac_table.multi_overflow = 0; |
521 | n->mac_table.uni_overflow = 0; | |
b6503ed9 | 522 | memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); |
41dc8a67 | 523 | memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); |
702d66a8 | 524 | qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); |
f21c0ed9 | 525 | memset(n->vlans, 0, MAX_VLAN >> 3); |
94b52958 GK |
526 | |
527 | /* Flush any async TX */ | |
528 | for (i = 0; i < n->max_queues; i++) { | |
529 | NetClientState *nc = qemu_get_subqueue(n->nic, i); | |
530 | ||
531 | if (nc->peer) { | |
532 | qemu_flush_or_purge_queued_packets(nc->peer, true); | |
533 | assert(!virtio_net_get_subqueue(nc)->async_tx.elem); | |
534 | } | |
535 | } | |
002437cd AL |
536 | } |
537 | ||
6e371ab8 | 538 | static void peer_test_vnet_hdr(VirtIONet *n) |
3a330134 | 539 | { |
b356f76d JW |
540 | NetClientState *nc = qemu_get_queue(n->nic); |
541 | if (!nc->peer) { | |
6e371ab8 | 542 | return; |
b356f76d | 543 | } |
3a330134 | 544 | |
d6085e3a | 545 | n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer); |
6e371ab8 | 546 | } |
3a330134 | 547 | |
6e371ab8 MT |
548 | static int peer_has_vnet_hdr(VirtIONet *n) |
549 | { | |
3a330134 MM |
550 | return n->has_vnet_hdr; |
551 | } | |
552 | ||
0ce0e8f4 MM |
553 | static int peer_has_ufo(VirtIONet *n) |
554 | { | |
555 | if (!peer_has_vnet_hdr(n)) | |
556 | return 0; | |
557 | ||
d6085e3a | 558 | n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer); |
0ce0e8f4 MM |
559 | |
560 | return n->has_ufo; | |
561 | } | |
562 | ||
bb9d17f8 CH |
563 | static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, |
564 | int version_1) | |
ff3a8066 | 565 | { |
fed699f9 JW |
566 | int i; |
567 | NetClientState *nc; | |
568 | ||
ff3a8066 MT |
569 | n->mergeable_rx_bufs = mergeable_rx_bufs; |
570 | ||
bb9d17f8 CH |
571 | if (version_1) { |
572 | n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); | |
573 | } else { | |
574 | n->guest_hdr_len = n->mergeable_rx_bufs ? | |
575 | sizeof(struct virtio_net_hdr_mrg_rxbuf) : | |
576 | sizeof(struct virtio_net_hdr); | |
577 | } | |
ff3a8066 | 578 | |
fed699f9 JW |
579 | for (i = 0; i < n->max_queues; i++) { |
580 | nc = qemu_get_subqueue(n->nic, i); | |
581 | ||
582 | if (peer_has_vnet_hdr(n) && | |
d6085e3a SH |
583 | qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { |
584 | qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); | |
fed699f9 JW |
585 | n->host_hdr_len = n->guest_hdr_len; |
586 | } | |
ff3a8066 MT |
587 | } |
588 | } | |
589 | ||
2eef278b MT |
590 | static int virtio_net_max_tx_queue_size(VirtIONet *n) |
591 | { | |
592 | NetClientState *peer = n->nic_conf.peers.ncs[0]; | |
593 | ||
594 | /* | |
595 | * Backends other than vhost-user don't support max queue size. | |
596 | */ | |
597 | if (!peer) { | |
598 | return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; | |
599 | } | |
600 | ||
601 | if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) { | |
602 | return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; | |
603 | } | |
604 | ||
605 | return VIRTQUEUE_MAX_SIZE; | |
606 | } | |
607 | ||
fed699f9 JW |
608 | static int peer_attach(VirtIONet *n, int index) |
609 | { | |
610 | NetClientState *nc = qemu_get_subqueue(n->nic, index); | |
611 | ||
612 | if (!nc->peer) { | |
613 | return 0; | |
614 | } | |
615 | ||
f394b2e2 | 616 | if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { |
7263a0ad CO |
617 | vhost_set_vring_enable(nc->peer, 1); |
618 | } | |
619 | ||
f394b2e2 | 620 | if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { |
fed699f9 JW |
621 | return 0; |
622 | } | |
623 | ||
1074b879 JW |
624 | if (n->max_queues == 1) { |
625 | return 0; | |
626 | } | |
627 | ||
fed699f9 JW |
628 | return tap_enable(nc->peer); |
629 | } | |
630 | ||
631 | static int peer_detach(VirtIONet *n, int index) | |
632 | { | |
633 | NetClientState *nc = qemu_get_subqueue(n->nic, index); | |
634 | ||
635 | if (!nc->peer) { | |
636 | return 0; | |
637 | } | |
638 | ||
f394b2e2 | 639 | if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { |
7263a0ad CO |
640 | vhost_set_vring_enable(nc->peer, 0); |
641 | } | |
642 | ||
f394b2e2 | 643 | if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { |
fed699f9 JW |
644 | return 0; |
645 | } | |
646 | ||
647 | return tap_disable(nc->peer); | |
648 | } | |
649 | ||
650 | static void virtio_net_set_queues(VirtIONet *n) | |
651 | { | |
652 | int i; | |
ddfa83ea | 653 | int r; |
fed699f9 | 654 | |
68b5f314 YB |
655 | if (n->nic->peer_deleted) { |
656 | return; | |
657 | } | |
658 | ||
fed699f9 JW |
659 | for (i = 0; i < n->max_queues; i++) { |
660 | if (i < n->curr_queues) { | |
ddfa83ea JS |
661 | r = peer_attach(n, i); |
662 | assert(!r); | |
fed699f9 | 663 | } else { |
ddfa83ea JS |
664 | r = peer_detach(n, i); |
665 | assert(!r); | |
fed699f9 JW |
666 | } |
667 | } | |
668 | } | |
669 | ||
ec57db16 | 670 | static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); |
fed699f9 | 671 | |
9d5b731d JW |
672 | static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, |
673 | Error **errp) | |
fbe78f4f | 674 | { |
17a0ca55 | 675 | VirtIONet *n = VIRTIO_NET(vdev); |
b356f76d | 676 | NetClientState *nc = qemu_get_queue(n->nic); |
fbe78f4f | 677 | |
da3e8a23 SZ |
678 | /* Firstly sync all virtio-net possible supported features */ |
679 | features |= n->host_features; | |
680 | ||
0cd09c3a | 681 | virtio_add_feature(&features, VIRTIO_NET_F_MAC); |
c9f79a3f | 682 | |
6e371ab8 | 683 | if (!peer_has_vnet_hdr(n)) { |
0cd09c3a CH |
684 | virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); |
685 | virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); | |
686 | virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); | |
687 | virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); | |
8172539d | 688 | |
0cd09c3a CH |
689 | virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); |
690 | virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); | |
691 | virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); | |
692 | virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); | |
8172539d | 693 | } |
3a330134 | 694 | |
8172539d | 695 | if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { |
0cd09c3a CH |
696 | virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); |
697 | virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); | |
3a330134 MM |
698 | } |
699 | ||
ed8b4afe | 700 | if (!get_vhost_net(nc->peer)) { |
9bc6304c MT |
701 | return features; |
702 | } | |
2974e916 | 703 | |
75ebec11 MC |
704 | features = vhost_net_get_features(get_vhost_net(nc->peer), features); |
705 | vdev->backend_features = features; | |
706 | ||
707 | if (n->mtu_bypass_backend && | |
708 | (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) { | |
709 | features |= (1ULL << VIRTIO_NET_F_MTU); | |
710 | } | |
711 | ||
712 | return features; | |
fbe78f4f AL |
713 | } |
714 | ||
019a3edb | 715 | static uint64_t virtio_net_bad_features(VirtIODevice *vdev) |
8eca6b1b | 716 | { |
019a3edb | 717 | uint64_t features = 0; |
8eca6b1b AL |
718 | |
719 | /* Linux kernel 2.6.25. It understood MAC (as everyone must), | |
720 | * but also these: */ | |
0cd09c3a CH |
721 | virtio_add_feature(&features, VIRTIO_NET_F_MAC); |
722 | virtio_add_feature(&features, VIRTIO_NET_F_CSUM); | |
723 | virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4); | |
724 | virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6); | |
725 | virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN); | |
8eca6b1b | 726 | |
8172539d | 727 | return features; |
8eca6b1b AL |
728 | } |
729 | ||
644c9858 DF |
730 | static void virtio_net_apply_guest_offloads(VirtIONet *n) |
731 | { | |
ad37bb3b | 732 | qemu_set_offload(qemu_get_queue(n->nic)->peer, |
644c9858 DF |
733 | !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), |
734 | !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), | |
735 | !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), | |
736 | !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), | |
737 | !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); | |
738 | } | |
739 | ||
740 | static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) | |
741 | { | |
742 | static const uint64_t guest_offloads_mask = | |
743 | (1ULL << VIRTIO_NET_F_GUEST_CSUM) | | |
744 | (1ULL << VIRTIO_NET_F_GUEST_TSO4) | | |
745 | (1ULL << VIRTIO_NET_F_GUEST_TSO6) | | |
746 | (1ULL << VIRTIO_NET_F_GUEST_ECN) | | |
747 | (1ULL << VIRTIO_NET_F_GUEST_UFO); | |
748 | ||
749 | return guest_offloads_mask & features; | |
750 | } | |
751 | ||
752 | static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) | |
753 | { | |
754 | VirtIODevice *vdev = VIRTIO_DEVICE(n); | |
755 | return virtio_net_guest_offloads_by_features(vdev->guest_features); | |
756 | } | |
757 | ||
9711cd0d JF |
758 | static void failover_add_primary(VirtIONet *n, Error **errp) |
759 | { | |
760 | Error *err = NULL; | |
761 | ||
117378bf JF |
762 | if (n->primary_dev) { |
763 | return; | |
764 | } | |
765 | ||
9711cd0d JF |
766 | n->primary_device_opts = qemu_opts_find(qemu_find_opts("device"), |
767 | n->primary_device_id); | |
768 | if (n->primary_device_opts) { | |
769 | n->primary_dev = qdev_device_add(n->primary_device_opts, &err); | |
770 | if (err) { | |
771 | qemu_opts_del(n->primary_device_opts); | |
772 | } | |
773 | if (n->primary_dev) { | |
774 | n->primary_bus = n->primary_dev->parent_bus; | |
775 | if (err) { | |
776 | qdev_unplug(n->primary_dev, &err); | |
777 | qdev_set_id(n->primary_dev, ""); | |
778 | ||
779 | } | |
780 | } | |
781 | } else { | |
782 | error_setg(errp, "Primary device not found"); | |
783 | error_append_hint(errp, "Virtio-net failover will not work. Make " | |
784 | "sure primary device has parameter" | |
785 | " failover_pair_id=<virtio-net-id>\n"); | |
786 | } | |
787 | if (err) { | |
788 | error_propagate(errp, err); | |
789 | } | |
790 | } | |
791 | ||
792 | static int is_my_primary(void *opaque, QemuOpts *opts, Error **errp) | |
793 | { | |
794 | VirtIONet *n = opaque; | |
795 | int ret = 0; | |
796 | ||
797 | const char *standby_id = qemu_opt_get(opts, "failover_pair_id"); | |
798 | ||
799 | if (standby_id != NULL && (g_strcmp0(standby_id, n->netclient_name) == 0)) { | |
800 | n->primary_device_id = g_strdup(opts->id); | |
801 | ret = 1; | |
802 | } | |
803 | ||
804 | return ret; | |
805 | } | |
806 | ||
807 | static DeviceState *virtio_net_find_primary(VirtIONet *n, Error **errp) | |
808 | { | |
809 | DeviceState *dev = NULL; | |
810 | Error *err = NULL; | |
811 | ||
812 | if (qemu_opts_foreach(qemu_find_opts("device"), | |
813 | is_my_primary, n, &err)) { | |
814 | if (err) { | |
815 | error_propagate(errp, err); | |
816 | return NULL; | |
817 | } | |
818 | if (n->primary_device_id) { | |
819 | dev = qdev_find_recursive(sysbus_get_default(), | |
820 | n->primary_device_id); | |
821 | } else { | |
822 | error_setg(errp, "Primary device id not found"); | |
823 | return NULL; | |
824 | } | |
825 | } | |
826 | return dev; | |
827 | } | |
828 | ||
829 | ||
830 | ||
831 | static DeviceState *virtio_connect_failover_devices(VirtIONet *n, | |
832 | DeviceState *dev, | |
833 | Error **errp) | |
834 | { | |
835 | DeviceState *prim_dev = NULL; | |
836 | Error *err = NULL; | |
837 | ||
838 | prim_dev = virtio_net_find_primary(n, &err); | |
839 | if (prim_dev) { | |
840 | n->primary_device_id = g_strdup(prim_dev->id); | |
841 | n->primary_device_opts = prim_dev->opts; | |
842 | } else { | |
843 | if (err) { | |
844 | error_propagate(errp, err); | |
845 | } | |
846 | } | |
847 | ||
848 | return prim_dev; | |
849 | } | |
850 | ||
d5aaa1b0 | 851 | static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) |
fbe78f4f | 852 | { |
17a0ca55 | 853 | VirtIONet *n = VIRTIO_NET(vdev); |
9711cd0d | 854 | Error *err = NULL; |
fed699f9 JW |
855 | int i; |
856 | ||
75ebec11 MC |
857 | if (n->mtu_bypass_backend && |
858 | !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) { | |
859 | features &= ~(1ULL << VIRTIO_NET_F_MTU); | |
860 | } | |
861 | ||
ef546f12 | 862 | virtio_net_set_multiqueue(n, |
95129d6f | 863 | virtio_has_feature(features, VIRTIO_NET_F_MQ)); |
fbe78f4f | 864 | |
ef546f12 | 865 | virtio_net_set_mrg_rx_bufs(n, |
95129d6f CH |
866 | virtio_has_feature(features, |
867 | VIRTIO_NET_F_MRG_RXBUF), | |
868 | virtio_has_feature(features, | |
869 | VIRTIO_F_VERSION_1)); | |
f5436dd9 | 870 | |
2974e916 YB |
871 | n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) && |
872 | virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4); | |
873 | n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) && | |
874 | virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6); | |
875 | ||
f5436dd9 | 876 | if (n->has_vnet_hdr) { |
644c9858 DF |
877 | n->curr_guest_offloads = |
878 | virtio_net_guest_offloads_by_features(features); | |
879 | virtio_net_apply_guest_offloads(n); | |
f5436dd9 | 880 | } |
fed699f9 JW |
881 | |
882 | for (i = 0; i < n->max_queues; i++) { | |
883 | NetClientState *nc = qemu_get_subqueue(n->nic, i); | |
884 | ||
ed8b4afe | 885 | if (!get_vhost_net(nc->peer)) { |
fed699f9 JW |
886 | continue; |
887 | } | |
ed8b4afe | 888 | vhost_net_ack_features(get_vhost_net(nc->peer), features); |
dc14a397 | 889 | } |
0b1eaa88 | 890 | |
95129d6f | 891 | if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { |
0b1eaa88 SF |
892 | memset(n->vlans, 0, MAX_VLAN >> 3); |
893 | } else { | |
894 | memset(n->vlans, 0xff, MAX_VLAN >> 3); | |
895 | } | |
9711cd0d JF |
896 | |
897 | if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) { | |
898 | qapi_event_send_failover_negotiated(n->netclient_name); | |
899 | atomic_set(&n->primary_should_be_hidden, false); | |
900 | failover_add_primary(n, &err); | |
901 | if (err) { | |
902 | n->primary_dev = virtio_connect_failover_devices(n, n->qdev, &err); | |
903 | if (err) { | |
904 | goto out_err; | |
905 | } | |
906 | failover_add_primary(n, &err); | |
907 | if (err) { | |
908 | goto out_err; | |
909 | } | |
910 | } | |
911 | } | |
912 | return; | |
913 | ||
914 | out_err: | |
915 | if (err) { | |
916 | warn_report_err(err); | |
917 | } | |
fbe78f4f AL |
918 | } |
919 | ||
002437cd | 920 | static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, |
921ac5d0 | 921 | struct iovec *iov, unsigned int iov_cnt) |
002437cd AL |
922 | { |
923 | uint8_t on; | |
921ac5d0 | 924 | size_t s; |
b1be4280 | 925 | NetClientState *nc = qemu_get_queue(n->nic); |
002437cd | 926 | |
921ac5d0 MT |
927 | s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); |
928 | if (s != sizeof(on)) { | |
929 | return VIRTIO_NET_ERR; | |
002437cd AL |
930 | } |
931 | ||
dd23454b | 932 | if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { |
002437cd | 933 | n->promisc = on; |
dd23454b | 934 | } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { |
002437cd | 935 | n->allmulti = on; |
dd23454b | 936 | } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { |
015cb166 | 937 | n->alluni = on; |
dd23454b | 938 | } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { |
015cb166 | 939 | n->nomulti = on; |
dd23454b | 940 | } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { |
015cb166 | 941 | n->nouni = on; |
dd23454b | 942 | } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { |
015cb166 | 943 | n->nobcast = on; |
921ac5d0 | 944 | } else { |
002437cd | 945 | return VIRTIO_NET_ERR; |
921ac5d0 | 946 | } |
002437cd | 947 | |
b1be4280 AK |
948 | rxfilter_notify(nc); |
949 | ||
002437cd AL |
950 | return VIRTIO_NET_OK; |
951 | } | |
952 | ||
644c9858 DF |
953 | static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, |
954 | struct iovec *iov, unsigned int iov_cnt) | |
955 | { | |
956 | VirtIODevice *vdev = VIRTIO_DEVICE(n); | |
957 | uint64_t offloads; | |
958 | size_t s; | |
959 | ||
95129d6f | 960 | if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { |
644c9858 DF |
961 | return VIRTIO_NET_ERR; |
962 | } | |
963 | ||
964 | s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); | |
965 | if (s != sizeof(offloads)) { | |
966 | return VIRTIO_NET_ERR; | |
967 | } | |
968 | ||
969 | if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { | |
970 | uint64_t supported_offloads; | |
971 | ||
189ae6bb JW |
972 | offloads = virtio_ldq_p(vdev, &offloads); |
973 | ||
644c9858 DF |
974 | if (!n->has_vnet_hdr) { |
975 | return VIRTIO_NET_ERR; | |
976 | } | |
977 | ||
2974e916 YB |
978 | n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) && |
979 | virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4); | |
980 | n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) && | |
981 | virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6); | |
982 | virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT); | |
983 | ||
644c9858 DF |
984 | supported_offloads = virtio_net_supported_guest_offloads(n); |
985 | if (offloads & ~supported_offloads) { | |
986 | return VIRTIO_NET_ERR; | |
987 | } | |
988 | ||
989 | n->curr_guest_offloads = offloads; | |
990 | virtio_net_apply_guest_offloads(n); | |
991 | ||
992 | return VIRTIO_NET_OK; | |
993 | } else { | |
994 | return VIRTIO_NET_ERR; | |
995 | } | |
996 | } | |
997 | ||
b6503ed9 | 998 | static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, |
921ac5d0 | 999 | struct iovec *iov, unsigned int iov_cnt) |
b6503ed9 | 1000 | { |
1399c60d | 1001 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
b6503ed9 | 1002 | struct virtio_net_ctrl_mac mac_data; |
921ac5d0 | 1003 | size_t s; |
b1be4280 | 1004 | NetClientState *nc = qemu_get_queue(n->nic); |
b6503ed9 | 1005 | |
c1943a3f AK |
1006 | if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { |
1007 | if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { | |
1008 | return VIRTIO_NET_ERR; | |
1009 | } | |
1010 | s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); | |
1011 | assert(s == sizeof(n->mac)); | |
b356f76d | 1012 | qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); |
b1be4280 AK |
1013 | rxfilter_notify(nc); |
1014 | ||
c1943a3f AK |
1015 | return VIRTIO_NET_OK; |
1016 | } | |
1017 | ||
921ac5d0 | 1018 | if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { |
b6503ed9 | 1019 | return VIRTIO_NET_ERR; |
921ac5d0 | 1020 | } |
b6503ed9 | 1021 | |
cae2e556 AK |
1022 | int in_use = 0; |
1023 | int first_multi = 0; | |
1024 | uint8_t uni_overflow = 0; | |
1025 | uint8_t multi_overflow = 0; | |
1026 | uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); | |
b6503ed9 | 1027 | |
921ac5d0 MT |
1028 | s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, |
1029 | sizeof(mac_data.entries)); | |
1399c60d | 1030 | mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); |
921ac5d0 | 1031 | if (s != sizeof(mac_data.entries)) { |
b1be4280 | 1032 | goto error; |
921ac5d0 MT |
1033 | } |
1034 | iov_discard_front(&iov, &iov_cnt, s); | |
b6503ed9 | 1035 | |
921ac5d0 | 1036 | if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { |
b1be4280 | 1037 | goto error; |
921ac5d0 | 1038 | } |
b6503ed9 AL |
1039 | |
1040 | if (mac_data.entries <= MAC_TABLE_ENTRIES) { | |
cae2e556 | 1041 | s = iov_to_buf(iov, iov_cnt, 0, macs, |
921ac5d0 MT |
1042 | mac_data.entries * ETH_ALEN); |
1043 | if (s != mac_data.entries * ETH_ALEN) { | |
b1be4280 | 1044 | goto error; |
921ac5d0 | 1045 | } |
cae2e556 | 1046 | in_use += mac_data.entries; |
b6503ed9 | 1047 | } else { |
cae2e556 | 1048 | uni_overflow = 1; |
b6503ed9 AL |
1049 | } |
1050 | ||
921ac5d0 MT |
1051 | iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); |
1052 | ||
cae2e556 | 1053 | first_multi = in_use; |
2d9aba39 | 1054 | |
921ac5d0 MT |
1055 | s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, |
1056 | sizeof(mac_data.entries)); | |
1399c60d | 1057 | mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); |
921ac5d0 | 1058 | if (s != sizeof(mac_data.entries)) { |
b1be4280 | 1059 | goto error; |
921ac5d0 MT |
1060 | } |
1061 | ||
1062 | iov_discard_front(&iov, &iov_cnt, s); | |
b6503ed9 | 1063 | |
921ac5d0 | 1064 | if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { |
b1be4280 | 1065 | goto error; |
921ac5d0 | 1066 | } |
b6503ed9 | 1067 | |
edc24385 | 1068 | if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) { |
cae2e556 | 1069 | s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], |
921ac5d0 MT |
1070 | mac_data.entries * ETH_ALEN); |
1071 | if (s != mac_data.entries * ETH_ALEN) { | |
b1be4280 | 1072 | goto error; |
8fd2a2f1 | 1073 | } |
cae2e556 | 1074 | in_use += mac_data.entries; |
921ac5d0 | 1075 | } else { |
cae2e556 | 1076 | multi_overflow = 1; |
b6503ed9 AL |
1077 | } |
1078 | ||
cae2e556 AK |
1079 | n->mac_table.in_use = in_use; |
1080 | n->mac_table.first_multi = first_multi; | |
1081 | n->mac_table.uni_overflow = uni_overflow; | |
1082 | n->mac_table.multi_overflow = multi_overflow; | |
1083 | memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); | |
1084 | g_free(macs); | |
b1be4280 AK |
1085 | rxfilter_notify(nc); |
1086 | ||
b6503ed9 | 1087 | return VIRTIO_NET_OK; |
b1be4280 AK |
1088 | |
1089 | error: | |
cae2e556 | 1090 | g_free(macs); |
b1be4280 | 1091 | return VIRTIO_NET_ERR; |
b6503ed9 AL |
1092 | } |
1093 | ||
f21c0ed9 | 1094 | static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, |
921ac5d0 | 1095 | struct iovec *iov, unsigned int iov_cnt) |
f21c0ed9 | 1096 | { |
1399c60d | 1097 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
f21c0ed9 | 1098 | uint16_t vid; |
921ac5d0 | 1099 | size_t s; |
b1be4280 | 1100 | NetClientState *nc = qemu_get_queue(n->nic); |
f21c0ed9 | 1101 | |
921ac5d0 | 1102 | s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); |
1399c60d | 1103 | vid = virtio_lduw_p(vdev, &vid); |
921ac5d0 | 1104 | if (s != sizeof(vid)) { |
f21c0ed9 AL |
1105 | return VIRTIO_NET_ERR; |
1106 | } | |
1107 | ||
f21c0ed9 AL |
1108 | if (vid >= MAX_VLAN) |
1109 | return VIRTIO_NET_ERR; | |
1110 | ||
1111 | if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) | |
1112 | n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); | |
1113 | else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) | |
1114 | n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); | |
1115 | else | |
1116 | return VIRTIO_NET_ERR; | |
1117 | ||
b1be4280 AK |
1118 | rxfilter_notify(nc); |
1119 | ||
f21c0ed9 AL |
1120 | return VIRTIO_NET_OK; |
1121 | } | |
1122 | ||
f57fcf70 JW |
1123 | static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd, |
1124 | struct iovec *iov, unsigned int iov_cnt) | |
1125 | { | |
9d8c6a25 | 1126 | trace_virtio_net_handle_announce(n->announce_timer.round); |
f57fcf70 JW |
1127 | if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK && |
1128 | n->status & VIRTIO_NET_S_ANNOUNCE) { | |
1129 | n->status &= ~VIRTIO_NET_S_ANNOUNCE; | |
9d8c6a25 DDAG |
1130 | if (n->announce_timer.round) { |
1131 | qemu_announce_timer_step(&n->announce_timer); | |
f57fcf70 JW |
1132 | } |
1133 | return VIRTIO_NET_OK; | |
1134 | } else { | |
1135 | return VIRTIO_NET_ERR; | |
1136 | } | |
1137 | } | |
1138 | ||
fed699f9 | 1139 | static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, |
f8f7c533 | 1140 | struct iovec *iov, unsigned int iov_cnt) |
fed699f9 | 1141 | { |
17a0ca55 | 1142 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
f8f7c533 JW |
1143 | struct virtio_net_ctrl_mq mq; |
1144 | size_t s; | |
1145 | uint16_t queues; | |
fed699f9 | 1146 | |
f8f7c533 JW |
1147 | s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); |
1148 | if (s != sizeof(mq)) { | |
fed699f9 JW |
1149 | return VIRTIO_NET_ERR; |
1150 | } | |
1151 | ||
1152 | if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { | |
1153 | return VIRTIO_NET_ERR; | |
1154 | } | |
1155 | ||
1399c60d | 1156 | queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); |
fed699f9 | 1157 | |
f8f7c533 JW |
1158 | if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
1159 | queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || | |
1160 | queues > n->max_queues || | |
fed699f9 JW |
1161 | !n->multiqueue) { |
1162 | return VIRTIO_NET_ERR; | |
1163 | } | |
1164 | ||
f8f7c533 | 1165 | n->curr_queues = queues; |
fed699f9 JW |
1166 | /* stop the backend before changing the number of queues to avoid handling a |
1167 | * disabled queue */ | |
17a0ca55 | 1168 | virtio_net_set_status(vdev, vdev->status); |
fed699f9 JW |
1169 | virtio_net_set_queues(n); |
1170 | ||
1171 | return VIRTIO_NET_OK; | |
1172 | } | |
ba7eadb5 | 1173 | |
3d11d36c AL |
1174 | static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) |
1175 | { | |
17a0ca55 | 1176 | VirtIONet *n = VIRTIO_NET(vdev); |
3d11d36c AL |
1177 | struct virtio_net_ctrl_hdr ctrl; |
1178 | virtio_net_ctrl_ack status = VIRTIO_NET_ERR; | |
51b19ebe | 1179 | VirtQueueElement *elem; |
921ac5d0 | 1180 | size_t s; |
771b6ed3 | 1181 | struct iovec *iov, *iov2; |
921ac5d0 | 1182 | unsigned int iov_cnt; |
3d11d36c | 1183 | |
51b19ebe PB |
1184 | for (;;) { |
1185 | elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); | |
1186 | if (!elem) { | |
1187 | break; | |
1188 | } | |
1189 | if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || | |
1190 | iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { | |
ba7eadb5 GK |
1191 | virtio_error(vdev, "virtio-net ctrl missing headers"); |
1192 | virtqueue_detach_element(vq, elem, 0); | |
1193 | g_free(elem); | |
1194 | break; | |
3d11d36c AL |
1195 | } |
1196 | ||
51b19ebe PB |
1197 | iov_cnt = elem->out_num; |
1198 | iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num); | |
921ac5d0 MT |
1199 | s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); |
1200 | iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); | |
1201 | if (s != sizeof(ctrl)) { | |
1202 | status = VIRTIO_NET_ERR; | |
dd23454b | 1203 | } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { |
921ac5d0 MT |
1204 | status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); |
1205 | } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { | |
1206 | status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); | |
1207 | } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { | |
1208 | status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); | |
f57fcf70 JW |
1209 | } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { |
1210 | status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); | |
fed699f9 | 1211 | } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { |
f8f7c533 | 1212 | status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); |
644c9858 DF |
1213 | } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { |
1214 | status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); | |
3d11d36c AL |
1215 | } |
1216 | ||
51b19ebe | 1217 | s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); |
921ac5d0 | 1218 | assert(s == sizeof(status)); |
3d11d36c | 1219 | |
51b19ebe | 1220 | virtqueue_push(vq, elem, sizeof(status)); |
3d11d36c | 1221 | virtio_notify(vdev, vq); |
771b6ed3 | 1222 | g_free(iov2); |
51b19ebe | 1223 | g_free(elem); |
3d11d36c AL |
1224 | } |
1225 | } | |
1226 | ||
fbe78f4f AL |
1227 | /* RX */ |
1228 | ||
1229 | static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) | |
1230 | { | |
17a0ca55 | 1231 | VirtIONet *n = VIRTIO_NET(vdev); |
fed699f9 | 1232 | int queue_index = vq2q(virtio_get_queue_index(vq)); |
8aeff62d | 1233 | |
fed699f9 | 1234 | qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); |
fbe78f4f AL |
1235 | } |
1236 | ||
b8c4b67e | 1237 | static bool virtio_net_can_receive(NetClientState *nc) |
fbe78f4f | 1238 | { |
cc1f0f45 | 1239 | VirtIONet *n = qemu_get_nic_opaque(nc); |
17a0ca55 | 1240 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
fed699f9 | 1241 | VirtIONetQueue *q = virtio_net_get_subqueue(nc); |
0c87e93e | 1242 | |
17a0ca55 | 1243 | if (!vdev->vm_running) { |
b8c4b67e | 1244 | return false; |
95477323 | 1245 | } |
cdd5cc12 | 1246 | |
fed699f9 | 1247 | if (nc->queue_index >= n->curr_queues) { |
b8c4b67e | 1248 | return false; |
fed699f9 JW |
1249 | } |
1250 | ||
0c87e93e | 1251 | if (!virtio_queue_ready(q->rx_vq) || |
17a0ca55 | 1252 | !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { |
b8c4b67e | 1253 | return false; |
0c87e93e | 1254 | } |
fbe78f4f | 1255 | |
b8c4b67e | 1256 | return true; |
cdd5cc12 MM |
1257 | } |
1258 | ||
0c87e93e | 1259 | static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) |
cdd5cc12 | 1260 | { |
0c87e93e JW |
1261 | VirtIONet *n = q->n; |
1262 | if (virtio_queue_empty(q->rx_vq) || | |
fbe78f4f | 1263 | (n->mergeable_rx_bufs && |
0c87e93e JW |
1264 | !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { |
1265 | virtio_queue_set_notification(q->rx_vq, 1); | |
06b12970 TL |
1266 | |
1267 | /* To avoid a race condition where the guest has made some buffers | |
1268 | * available after the above check but before notification was | |
1269 | * enabled, check for available buffers again. | |
1270 | */ | |
0c87e93e | 1271 | if (virtio_queue_empty(q->rx_vq) || |
06b12970 | 1272 | (n->mergeable_rx_bufs && |
0c87e93e | 1273 | !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { |
06b12970 | 1274 | return 0; |
0c87e93e | 1275 | } |
fbe78f4f AL |
1276 | } |
1277 | ||
0c87e93e | 1278 | virtio_queue_set_notification(q->rx_vq, 0); |
fbe78f4f AL |
1279 | return 1; |
1280 | } | |
1281 | ||
1399c60d | 1282 | static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr) |
032a74a1 | 1283 | { |
1399c60d RR |
1284 | virtio_tswap16s(vdev, &hdr->hdr_len); |
1285 | virtio_tswap16s(vdev, &hdr->gso_size); | |
1286 | virtio_tswap16s(vdev, &hdr->csum_start); | |
1287 | virtio_tswap16s(vdev, &hdr->csum_offset); | |
032a74a1 CLG |
1288 | } |
1289 | ||
1d41b0c1 AL |
1290 | /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so |
1291 | * it never finds out that the packets don't have valid checksums. This | |
1292 | * causes dhclient to get upset. Fedora's carried a patch for ages to | |
1293 | * fix this with Xen but it hasn't appeared in an upstream release of | |
1294 | * dhclient yet. | |
1295 | * | |
1296 | * To avoid breaking existing guests, we catch udp packets and add | |
1297 | * checksums. This is terrible but it's better than hacking the guest | |
1298 | * kernels. | |
1299 | * | |
1300 | * N.B. if we introduce a zero-copy API, this operation is no longer free so | |
1301 | * we should provide a mechanism to disable it to avoid polluting the host | |
1302 | * cache. | |
1303 | */ | |
1304 | static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, | |
22cc84db | 1305 | uint8_t *buf, size_t size) |
1d41b0c1 AL |
1306 | { |
1307 | if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ | |
1308 | (size > 27 && size < 1500) && /* normal sized MTU */ | |
1309 | (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ | |
1310 | (buf[23] == 17) && /* ip.protocol == UDP */ | |
1311 | (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ | |
22cc84db | 1312 | net_checksum_calculate(buf, size); |
1d41b0c1 AL |
1313 | hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; |
1314 | } | |
1315 | } | |
1316 | ||
280598b7 MT |
1317 | static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, |
1318 | const void *buf, size_t size) | |
fbe78f4f | 1319 | { |
3a330134 | 1320 | if (n->has_vnet_hdr) { |
22cc84db MT |
1321 | /* FIXME this cast is evil */ |
1322 | void *wbuf = (void *)buf; | |
280598b7 MT |
1323 | work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, |
1324 | size - n->host_hdr_len); | |
1bfa316c GK |
1325 | |
1326 | if (n->needs_vnet_hdr_swap) { | |
1327 | virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf); | |
1328 | } | |
280598b7 | 1329 | iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); |
22cc84db MT |
1330 | } else { |
1331 | struct virtio_net_hdr hdr = { | |
1332 | .flags = 0, | |
1333 | .gso_type = VIRTIO_NET_HDR_GSO_NONE | |
1334 | }; | |
1335 | iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); | |
3a330134 | 1336 | } |
fbe78f4f AL |
1337 | } |
1338 | ||
3831ab20 AL |
1339 | static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) |
1340 | { | |
1341 | static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | |
f21c0ed9 | 1342 | static const uint8_t vlan[] = {0x81, 0x00}; |
3831ab20 | 1343 | uint8_t *ptr = (uint8_t *)buf; |
b6503ed9 | 1344 | int i; |
3831ab20 AL |
1345 | |
1346 | if (n->promisc) | |
1347 | return 1; | |
1348 | ||
e043ebc6 | 1349 | ptr += n->host_hdr_len; |
3a330134 | 1350 | |
f21c0ed9 | 1351 | if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { |
7542d3e7 | 1352 | int vid = lduw_be_p(ptr + 14) & 0xfff; |
f21c0ed9 AL |
1353 | if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) |
1354 | return 0; | |
1355 | } | |
1356 | ||
bbe2f399 AW |
1357 | if (ptr[0] & 1) { // multicast |
1358 | if (!memcmp(ptr, bcast, sizeof(bcast))) { | |
015cb166 AW |
1359 | return !n->nobcast; |
1360 | } else if (n->nomulti) { | |
1361 | return 0; | |
8fd2a2f1 | 1362 | } else if (n->allmulti || n->mac_table.multi_overflow) { |
bbe2f399 AW |
1363 | return 1; |
1364 | } | |
2d9aba39 AW |
1365 | |
1366 | for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { | |
1367 | if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { | |
1368 | return 1; | |
1369 | } | |
1370 | } | |
bbe2f399 | 1371 | } else { // unicast |
015cb166 AW |
1372 | if (n->nouni) { |
1373 | return 0; | |
1374 | } else if (n->alluni || n->mac_table.uni_overflow) { | |
8fd2a2f1 AW |
1375 | return 1; |
1376 | } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { | |
bbe2f399 AW |
1377 | return 1; |
1378 | } | |
3831ab20 | 1379 | |
2d9aba39 AW |
1380 | for (i = 0; i < n->mac_table.first_multi; i++) { |
1381 | if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { | |
1382 | return 1; | |
1383 | } | |
1384 | } | |
b6503ed9 AL |
1385 | } |
1386 | ||
3831ab20 AL |
1387 | return 0; |
1388 | } | |
1389 | ||
97cd965c PB |
1390 | static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, |
1391 | size_t size) | |
fbe78f4f | 1392 | { |
cc1f0f45 | 1393 | VirtIONet *n = qemu_get_nic_opaque(nc); |
fed699f9 | 1394 | VirtIONetQueue *q = virtio_net_get_subqueue(nc); |
17a0ca55 | 1395 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
63c58728 MT |
1396 | struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; |
1397 | struct virtio_net_hdr_mrg_rxbuf mhdr; | |
1398 | unsigned mhdr_cnt = 0; | |
22cc84db | 1399 | size_t offset, i, guest_offset; |
fbe78f4f | 1400 | |
fed699f9 | 1401 | if (!virtio_net_can_receive(nc)) { |
cdd5cc12 | 1402 | return -1; |
b356f76d | 1403 | } |
cdd5cc12 | 1404 | |
940cda94 | 1405 | /* hdr_len refers to the header we supply to the guest */ |
0c87e93e | 1406 | if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { |
8aeff62d | 1407 | return 0; |
0c87e93e | 1408 | } |
fbe78f4f | 1409 | |
3831ab20 | 1410 | if (!receive_filter(n, buf, size)) |
4f1c942b | 1411 | return size; |
3831ab20 | 1412 | |
fbe78f4f AL |
1413 | offset = i = 0; |
1414 | ||
1415 | while (offset < size) { | |
51b19ebe | 1416 | VirtQueueElement *elem; |
fbe78f4f | 1417 | int len, total; |
51b19ebe | 1418 | const struct iovec *sg; |
fbe78f4f | 1419 | |
22c253d9 | 1420 | total = 0; |
fbe78f4f | 1421 | |
51b19ebe PB |
1422 | elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); |
1423 | if (!elem) { | |
ba10b9c0 GK |
1424 | if (i) { |
1425 | virtio_error(vdev, "virtio-net unexpected empty queue: " | |
1426 | "i %zd mergeable %d offset %zd, size %zd, " | |
1427 | "guest hdr len %zd, host hdr len %zd " | |
1428 | "guest features 0x%" PRIx64, | |
1429 | i, n->mergeable_rx_bufs, offset, size, | |
1430 | n->guest_hdr_len, n->host_hdr_len, | |
1431 | vdev->guest_features); | |
1432 | } | |
1433 | return -1; | |
fbe78f4f AL |
1434 | } |
1435 | ||
51b19ebe | 1436 | if (elem->in_num < 1) { |
ba10b9c0 GK |
1437 | virtio_error(vdev, |
1438 | "virtio-net receive queue contains no in buffers"); | |
1439 | virtqueue_detach_element(q->rx_vq, elem, 0); | |
1440 | g_free(elem); | |
1441 | return -1; | |
fbe78f4f AL |
1442 | } |
1443 | ||
51b19ebe | 1444 | sg = elem->in_sg; |
fbe78f4f | 1445 | if (i == 0) { |
c8d28e7e | 1446 | assert(offset == 0); |
63c58728 MT |
1447 | if (n->mergeable_rx_bufs) { |
1448 | mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), | |
51b19ebe | 1449 | sg, elem->in_num, |
63c58728 MT |
1450 | offsetof(typeof(mhdr), num_buffers), |
1451 | sizeof(mhdr.num_buffers)); | |
1452 | } | |
fbe78f4f | 1453 | |
51b19ebe | 1454 | receive_header(n, sg, elem->in_num, buf, size); |
c8d28e7e | 1455 | offset = n->host_hdr_len; |
e35e23f6 | 1456 | total += n->guest_hdr_len; |
22cc84db MT |
1457 | guest_offset = n->guest_hdr_len; |
1458 | } else { | |
1459 | guest_offset = 0; | |
fbe78f4f AL |
1460 | } |
1461 | ||
1462 | /* copy in packet. ugh */ | |
51b19ebe | 1463 | len = iov_from_buf(sg, elem->in_num, guest_offset, |
dcf6f5e1 | 1464 | buf + offset, size - offset); |
fbe78f4f | 1465 | total += len; |
279a4253 MT |
1466 | offset += len; |
1467 | /* If buffers can't be merged, at this point we | |
1468 | * must have consumed the complete packet. | |
1469 | * Otherwise, drop it. */ | |
1470 | if (!n->mergeable_rx_bufs && offset < size) { | |
27e57efe | 1471 | virtqueue_unpop(q->rx_vq, elem, total); |
51b19ebe | 1472 | g_free(elem); |
279a4253 MT |
1473 | return size; |
1474 | } | |
fbe78f4f AL |
1475 | |
1476 | /* signal other side */ | |
51b19ebe PB |
1477 | virtqueue_fill(q->rx_vq, elem, total, i++); |
1478 | g_free(elem); | |
fbe78f4f AL |
1479 | } |
1480 | ||
63c58728 | 1481 | if (mhdr_cnt) { |
1399c60d | 1482 | virtio_stw_p(vdev, &mhdr.num_buffers, i); |
63c58728 MT |
1483 | iov_from_buf(mhdr_sg, mhdr_cnt, |
1484 | 0, | |
1485 | &mhdr.num_buffers, sizeof mhdr.num_buffers); | |
44b15bc5 | 1486 | } |
fbe78f4f | 1487 | |
0c87e93e | 1488 | virtqueue_flush(q->rx_vq, i); |
17a0ca55 | 1489 | virtio_notify(vdev, q->rx_vq); |
4f1c942b MM |
1490 | |
1491 | return size; | |
fbe78f4f AL |
1492 | } |
1493 | ||
2974e916 | 1494 | static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf, |
97cd965c PB |
1495 | size_t size) |
1496 | { | |
068ddfa9 | 1497 | RCU_READ_LOCK_GUARD(); |
97cd965c | 1498 | |
068ddfa9 | 1499 | return virtio_net_receive_rcu(nc, buf, size); |
97cd965c PB |
1500 | } |
1501 | ||
2974e916 YB |
1502 | static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain, |
1503 | const uint8_t *buf, | |
1504 | VirtioNetRscUnit *unit) | |
1505 | { | |
1506 | uint16_t ip_hdrlen; | |
1507 | struct ip_header *ip; | |
1508 | ||
1509 | ip = (struct ip_header *)(buf + chain->n->guest_hdr_len | |
1510 | + sizeof(struct eth_header)); | |
1511 | unit->ip = (void *)ip; | |
1512 | ip_hdrlen = (ip->ip_ver_len & 0xF) << 2; | |
1513 | unit->ip_plen = &ip->ip_len; | |
1514 | unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen); | |
1515 | unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10; | |
1516 | unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen; | |
1517 | } | |
1518 | ||
1519 | static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain, | |
1520 | const uint8_t *buf, | |
1521 | VirtioNetRscUnit *unit) | |
1522 | { | |
1523 | struct ip6_header *ip6; | |
1524 | ||
1525 | ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len | |
1526 | + sizeof(struct eth_header)); | |
1527 | unit->ip = ip6; | |
1528 | unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen); | |
78ee6bd0 | 1529 | unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) |
2974e916 YB |
1530 | + sizeof(struct ip6_header)); |
1531 | unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10; | |
1532 | ||
1533 | /* There is a difference between payload lenght in ipv4 and v6, | |
1534 | ip header is excluded in ipv6 */ | |
1535 | unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen; | |
1536 | } | |
1537 | ||
1538 | static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain, | |
1539 | VirtioNetRscSeg *seg) | |
1540 | { | |
1541 | int ret; | |
1542 | struct virtio_net_hdr *h; | |
1543 | ||
1544 | h = (struct virtio_net_hdr *)seg->buf; | |
1545 | h->flags = 0; | |
1546 | h->gso_type = VIRTIO_NET_HDR_GSO_NONE; | |
1547 | ||
1548 | if (seg->is_coalesced) { | |
1549 | *virtio_net_rsc_ext_num_packets(h) = seg->packets; | |
1550 | *virtio_net_rsc_ext_num_dupacks(h) = seg->dup_ack; | |
1551 | h->flags = VIRTIO_NET_HDR_F_RSC_INFO; | |
1552 | if (chain->proto == ETH_P_IP) { | |
1553 | h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | |
1554 | } else { | |
1555 | h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | |
1556 | } | |
1557 | } | |
1558 | ||
1559 | ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size); | |
1560 | QTAILQ_REMOVE(&chain->buffers, seg, next); | |
1561 | g_free(seg->buf); | |
1562 | g_free(seg); | |
1563 | ||
1564 | return ret; | |
1565 | } | |
1566 | ||
1567 | static void virtio_net_rsc_purge(void *opq) | |
1568 | { | |
1569 | VirtioNetRscSeg *seg, *rn; | |
1570 | VirtioNetRscChain *chain = (VirtioNetRscChain *)opq; | |
1571 | ||
1572 | QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) { | |
1573 | if (virtio_net_rsc_drain_seg(chain, seg) == 0) { | |
1574 | chain->stat.purge_failed++; | |
1575 | continue; | |
1576 | } | |
1577 | } | |
1578 | ||
1579 | chain->stat.timer++; | |
1580 | if (!QTAILQ_EMPTY(&chain->buffers)) { | |
1581 | timer_mod(chain->drain_timer, | |
1582 | qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout); | |
1583 | } | |
1584 | } | |
1585 | ||
1586 | static void virtio_net_rsc_cleanup(VirtIONet *n) | |
1587 | { | |
1588 | VirtioNetRscChain *chain, *rn_chain; | |
1589 | VirtioNetRscSeg *seg, *rn_seg; | |
1590 | ||
1591 | QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) { | |
1592 | QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) { | |
1593 | QTAILQ_REMOVE(&chain->buffers, seg, next); | |
1594 | g_free(seg->buf); | |
1595 | g_free(seg); | |
1596 | } | |
1597 | ||
1598 | timer_del(chain->drain_timer); | |
1599 | timer_free(chain->drain_timer); | |
1600 | QTAILQ_REMOVE(&n->rsc_chains, chain, next); | |
1601 | g_free(chain); | |
1602 | } | |
1603 | } | |
1604 | ||
1605 | static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain, | |
1606 | NetClientState *nc, | |
1607 | const uint8_t *buf, size_t size) | |
1608 | { | |
1609 | uint16_t hdr_len; | |
1610 | VirtioNetRscSeg *seg; | |
1611 | ||
1612 | hdr_len = chain->n->guest_hdr_len; | |
1613 | seg = g_malloc(sizeof(VirtioNetRscSeg)); | |
1614 | seg->buf = g_malloc(hdr_len + sizeof(struct eth_header) | |
1615 | + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD); | |
1616 | memcpy(seg->buf, buf, size); | |
1617 | seg->size = size; | |
1618 | seg->packets = 1; | |
1619 | seg->dup_ack = 0; | |
1620 | seg->is_coalesced = 0; | |
1621 | seg->nc = nc; | |
1622 | ||
1623 | QTAILQ_INSERT_TAIL(&chain->buffers, seg, next); | |
1624 | chain->stat.cache++; | |
1625 | ||
1626 | switch (chain->proto) { | |
1627 | case ETH_P_IP: | |
1628 | virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit); | |
1629 | break; | |
1630 | case ETH_P_IPV6: | |
1631 | virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit); | |
1632 | break; | |
1633 | default: | |
1634 | g_assert_not_reached(); | |
1635 | } | |
1636 | } | |
1637 | ||
1638 | static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain, | |
1639 | VirtioNetRscSeg *seg, | |
1640 | const uint8_t *buf, | |
1641 | struct tcp_header *n_tcp, | |
1642 | struct tcp_header *o_tcp) | |
1643 | { | |
1644 | uint32_t nack, oack; | |
1645 | uint16_t nwin, owin; | |
1646 | ||
1647 | nack = htonl(n_tcp->th_ack); | |
1648 | nwin = htons(n_tcp->th_win); | |
1649 | oack = htonl(o_tcp->th_ack); | |
1650 | owin = htons(o_tcp->th_win); | |
1651 | ||
1652 | if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) { | |
1653 | chain->stat.ack_out_of_win++; | |
1654 | return RSC_FINAL; | |
1655 | } else if (nack == oack) { | |
1656 | /* duplicated ack or window probe */ | |
1657 | if (nwin == owin) { | |
1658 | /* duplicated ack, add dup ack count due to whql test up to 1 */ | |
1659 | chain->stat.dup_ack++; | |
1660 | return RSC_FINAL; | |
1661 | } else { | |
1662 | /* Coalesce window update */ | |
1663 | o_tcp->th_win = n_tcp->th_win; | |
1664 | chain->stat.win_update++; | |
1665 | return RSC_COALESCE; | |
1666 | } | |
1667 | } else { | |
1668 | /* pure ack, go to 'C', finalize*/ | |
1669 | chain->stat.pure_ack++; | |
1670 | return RSC_FINAL; | |
1671 | } | |
1672 | } | |
1673 | ||
1674 | static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain, | |
1675 | VirtioNetRscSeg *seg, | |
1676 | const uint8_t *buf, | |
1677 | VirtioNetRscUnit *n_unit) | |
1678 | { | |
1679 | void *data; | |
1680 | uint16_t o_ip_len; | |
1681 | uint32_t nseq, oseq; | |
1682 | VirtioNetRscUnit *o_unit; | |
1683 | ||
1684 | o_unit = &seg->unit; | |
1685 | o_ip_len = htons(*o_unit->ip_plen); | |
1686 | nseq = htonl(n_unit->tcp->th_seq); | |
1687 | oseq = htonl(o_unit->tcp->th_seq); | |
1688 | ||
1689 | /* out of order or retransmitted. */ | |
1690 | if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) { | |
1691 | chain->stat.data_out_of_win++; | |
1692 | return RSC_FINAL; | |
1693 | } | |
1694 | ||
1695 | data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen; | |
1696 | if (nseq == oseq) { | |
1697 | if ((o_unit->payload == 0) && n_unit->payload) { | |
1698 | /* From no payload to payload, normal case, not a dup ack or etc */ | |
1699 | chain->stat.data_after_pure_ack++; | |
1700 | goto coalesce; | |
1701 | } else { | |
1702 | return virtio_net_rsc_handle_ack(chain, seg, buf, | |
1703 | n_unit->tcp, o_unit->tcp); | |
1704 | } | |
1705 | } else if ((nseq - oseq) != o_unit->payload) { | |
1706 | /* Not a consistent packet, out of order */ | |
1707 | chain->stat.data_out_of_order++; | |
1708 | return RSC_FINAL; | |
1709 | } else { | |
1710 | coalesce: | |
1711 | if ((o_ip_len + n_unit->payload) > chain->max_payload) { | |
1712 | chain->stat.over_size++; | |
1713 | return RSC_FINAL; | |
1714 | } | |
1715 | ||
1716 | /* Here comes the right data, the payload length in v4/v6 is different, | |
1717 | so use the field value to update and record the new data len */ | |
1718 | o_unit->payload += n_unit->payload; /* update new data len */ | |
1719 | ||
1720 | /* update field in ip header */ | |
1721 | *o_unit->ip_plen = htons(o_ip_len + n_unit->payload); | |
1722 | ||
1723 | /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced | |
1724 | for windows guest, while this may change the behavior for linux | |
1725 | guest (only if it uses RSC feature). */ | |
1726 | o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags; | |
1727 | ||
1728 | o_unit->tcp->th_ack = n_unit->tcp->th_ack; | |
1729 | o_unit->tcp->th_win = n_unit->tcp->th_win; | |
1730 | ||
1731 | memmove(seg->buf + seg->size, data, n_unit->payload); | |
1732 | seg->size += n_unit->payload; | |
1733 | seg->packets++; | |
1734 | chain->stat.coalesced++; | |
1735 | return RSC_COALESCE; | |
1736 | } | |
1737 | } | |
1738 | ||
1739 | static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain, | |
1740 | VirtioNetRscSeg *seg, | |
1741 | const uint8_t *buf, size_t size, | |
1742 | VirtioNetRscUnit *unit) | |
1743 | { | |
1744 | struct ip_header *ip1, *ip2; | |
1745 | ||
1746 | ip1 = (struct ip_header *)(unit->ip); | |
1747 | ip2 = (struct ip_header *)(seg->unit.ip); | |
1748 | if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst) | |
1749 | || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport) | |
1750 | || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) { | |
1751 | chain->stat.no_match++; | |
1752 | return RSC_NO_MATCH; | |
1753 | } | |
1754 | ||
1755 | return virtio_net_rsc_coalesce_data(chain, seg, buf, unit); | |
1756 | } | |
1757 | ||
1758 | static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain, | |
1759 | VirtioNetRscSeg *seg, | |
1760 | const uint8_t *buf, size_t size, | |
1761 | VirtioNetRscUnit *unit) | |
1762 | { | |
1763 | struct ip6_header *ip1, *ip2; | |
1764 | ||
1765 | ip1 = (struct ip6_header *)(unit->ip); | |
1766 | ip2 = (struct ip6_header *)(seg->unit.ip); | |
1767 | if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address)) | |
1768 | || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address)) | |
1769 | || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport) | |
1770 | || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) { | |
1771 | chain->stat.no_match++; | |
1772 | return RSC_NO_MATCH; | |
1773 | } | |
1774 | ||
1775 | return virtio_net_rsc_coalesce_data(chain, seg, buf, unit); | |
1776 | } | |
1777 | ||
1778 | /* Packets with 'SYN' should bypass, other flag should be sent after drain | |
1779 | * to prevent out of order */ | |
1780 | static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain, | |
1781 | struct tcp_header *tcp) | |
1782 | { | |
1783 | uint16_t tcp_hdr; | |
1784 | uint16_t tcp_flag; | |
1785 | ||
1786 | tcp_flag = htons(tcp->th_offset_flags); | |
1787 | tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10; | |
1788 | tcp_flag &= VIRTIO_NET_TCP_FLAG; | |
1789 | tcp_flag = htons(tcp->th_offset_flags) & 0x3F; | |
1790 | if (tcp_flag & TH_SYN) { | |
1791 | chain->stat.tcp_syn++; | |
1792 | return RSC_BYPASS; | |
1793 | } | |
1794 | ||
1795 | if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) { | |
1796 | chain->stat.tcp_ctrl_drain++; | |
1797 | return RSC_FINAL; | |
1798 | } | |
1799 | ||
1800 | if (tcp_hdr > sizeof(struct tcp_header)) { | |
1801 | chain->stat.tcp_all_opt++; | |
1802 | return RSC_FINAL; | |
1803 | } | |
1804 | ||
1805 | return RSC_CANDIDATE; | |
1806 | } | |
1807 | ||
1808 | static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain, | |
1809 | NetClientState *nc, | |
1810 | const uint8_t *buf, size_t size, | |
1811 | VirtioNetRscUnit *unit) | |
1812 | { | |
1813 | int ret; | |
1814 | VirtioNetRscSeg *seg, *nseg; | |
1815 | ||
1816 | if (QTAILQ_EMPTY(&chain->buffers)) { | |
1817 | chain->stat.empty_cache++; | |
1818 | virtio_net_rsc_cache_buf(chain, nc, buf, size); | |
1819 | timer_mod(chain->drain_timer, | |
1820 | qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout); | |
1821 | return size; | |
1822 | } | |
1823 | ||
1824 | QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) { | |
1825 | if (chain->proto == ETH_P_IP) { | |
1826 | ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit); | |
1827 | } else { | |
1828 | ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit); | |
1829 | } | |
1830 | ||
1831 | if (ret == RSC_FINAL) { | |
1832 | if (virtio_net_rsc_drain_seg(chain, seg) == 0) { | |
1833 | /* Send failed */ | |
1834 | chain->stat.final_failed++; | |
1835 | return 0; | |
1836 | } | |
1837 | ||
1838 | /* Send current packet */ | |
1839 | return virtio_net_do_receive(nc, buf, size); | |
1840 | } else if (ret == RSC_NO_MATCH) { | |
1841 | continue; | |
1842 | } else { | |
1843 | /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */ | |
1844 | seg->is_coalesced = 1; | |
1845 | return size; | |
1846 | } | |
1847 | } | |
1848 | ||
1849 | chain->stat.no_match_cache++; | |
1850 | virtio_net_rsc_cache_buf(chain, nc, buf, size); | |
1851 | return size; | |
1852 | } | |
1853 | ||
1854 | /* Drain a connection data, this is to avoid out of order segments */ | |
1855 | static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain, | |
1856 | NetClientState *nc, | |
1857 | const uint8_t *buf, size_t size, | |
1858 | uint16_t ip_start, uint16_t ip_size, | |
1859 | uint16_t tcp_port) | |
1860 | { | |
1861 | VirtioNetRscSeg *seg, *nseg; | |
1862 | uint32_t ppair1, ppair2; | |
1863 | ||
1864 | ppair1 = *(uint32_t *)(buf + tcp_port); | |
1865 | QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) { | |
1866 | ppair2 = *(uint32_t *)(seg->buf + tcp_port); | |
1867 | if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size) | |
1868 | || (ppair1 != ppair2)) { | |
1869 | continue; | |
1870 | } | |
1871 | if (virtio_net_rsc_drain_seg(chain, seg) == 0) { | |
1872 | chain->stat.drain_failed++; | |
1873 | } | |
1874 | ||
1875 | break; | |
1876 | } | |
1877 | ||
1878 | return virtio_net_do_receive(nc, buf, size); | |
1879 | } | |
1880 | ||
1881 | static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain, | |
1882 | struct ip_header *ip, | |
1883 | const uint8_t *buf, size_t size) | |
1884 | { | |
1885 | uint16_t ip_len; | |
1886 | ||
1887 | /* Not an ipv4 packet */ | |
1888 | if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) { | |
1889 | chain->stat.ip_option++; | |
1890 | return RSC_BYPASS; | |
1891 | } | |
1892 | ||
1893 | /* Don't handle packets with ip option */ | |
1894 | if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) { | |
1895 | chain->stat.ip_option++; | |
1896 | return RSC_BYPASS; | |
1897 | } | |
1898 | ||
1899 | if (ip->ip_p != IPPROTO_TCP) { | |
1900 | chain->stat.bypass_not_tcp++; | |
1901 | return RSC_BYPASS; | |
1902 | } | |
1903 | ||
1904 | /* Don't handle packets with ip fragment */ | |
1905 | if (!(htons(ip->ip_off) & IP_DF)) { | |
1906 | chain->stat.ip_frag++; | |
1907 | return RSC_BYPASS; | |
1908 | } | |
1909 | ||
1910 | /* Don't handle packets with ecn flag */ | |
1911 | if (IPTOS_ECN(ip->ip_tos)) { | |
1912 | chain->stat.ip_ecn++; | |
1913 | return RSC_BYPASS; | |
1914 | } | |
1915 | ||
1916 | ip_len = htons(ip->ip_len); | |
1917 | if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header)) | |
1918 | || ip_len > (size - chain->n->guest_hdr_len - | |
1919 | sizeof(struct eth_header))) { | |
1920 | chain->stat.ip_hacked++; | |
1921 | return RSC_BYPASS; | |
1922 | } | |
1923 | ||
1924 | return RSC_CANDIDATE; | |
1925 | } | |
1926 | ||
1927 | static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain, | |
1928 | NetClientState *nc, | |
1929 | const uint8_t *buf, size_t size) | |
1930 | { | |
1931 | int32_t ret; | |
1932 | uint16_t hdr_len; | |
1933 | VirtioNetRscUnit unit; | |
1934 | ||
1935 | hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len; | |
1936 | ||
1937 | if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header) | |
1938 | + sizeof(struct tcp_header))) { | |
1939 | chain->stat.bypass_not_tcp++; | |
1940 | return virtio_net_do_receive(nc, buf, size); | |
1941 | } | |
1942 | ||
1943 | virtio_net_rsc_extract_unit4(chain, buf, &unit); | |
1944 | if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size) | |
1945 | != RSC_CANDIDATE) { | |
1946 | return virtio_net_do_receive(nc, buf, size); | |
1947 | } | |
1948 | ||
1949 | ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp); | |
1950 | if (ret == RSC_BYPASS) { | |
1951 | return virtio_net_do_receive(nc, buf, size); | |
1952 | } else if (ret == RSC_FINAL) { | |
1953 | return virtio_net_rsc_drain_flow(chain, nc, buf, size, | |
1954 | ((hdr_len + sizeof(struct eth_header)) + 12), | |
1955 | VIRTIO_NET_IP4_ADDR_SIZE, | |
1956 | hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)); | |
1957 | } | |
1958 | ||
1959 | return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit); | |
1960 | } | |
1961 | ||
1962 | static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain, | |
1963 | struct ip6_header *ip6, | |
1964 | const uint8_t *buf, size_t size) | |
1965 | { | |
1966 | uint16_t ip_len; | |
1967 | ||
1968 | if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4) | |
1969 | != IP_HEADER_VERSION_6) { | |
1970 | return RSC_BYPASS; | |
1971 | } | |
1972 | ||
1973 | /* Both option and protocol is checked in this */ | |
1974 | if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) { | |
1975 | chain->stat.bypass_not_tcp++; | |
1976 | return RSC_BYPASS; | |
1977 | } | |
1978 | ||
1979 | ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen); | |
1980 | if (ip_len < sizeof(struct tcp_header) || | |
1981 | ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header) | |
1982 | - sizeof(struct ip6_header))) { | |
1983 | chain->stat.ip_hacked++; | |
1984 | return RSC_BYPASS; | |
1985 | } | |
1986 | ||
1987 | /* Don't handle packets with ecn flag */ | |
1988 | if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) { | |
1989 | chain->stat.ip_ecn++; | |
1990 | return RSC_BYPASS; | |
1991 | } | |
1992 | ||
1993 | return RSC_CANDIDATE; | |
1994 | } | |
1995 | ||
1996 | static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc, | |
1997 | const uint8_t *buf, size_t size) | |
1998 | { | |
1999 | int32_t ret; | |
2000 | uint16_t hdr_len; | |
2001 | VirtioNetRscChain *chain; | |
2002 | VirtioNetRscUnit unit; | |
2003 | ||
2004 | chain = (VirtioNetRscChain *)opq; | |
2005 | hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len; | |
2006 | ||
2007 | if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header) | |
2008 | + sizeof(tcp_header))) { | |
2009 | return virtio_net_do_receive(nc, buf, size); | |
2010 | } | |
2011 | ||
2012 | virtio_net_rsc_extract_unit6(chain, buf, &unit); | |
2013 | if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain, | |
2014 | unit.ip, buf, size)) { | |
2015 | return virtio_net_do_receive(nc, buf, size); | |
2016 | } | |
2017 | ||
2018 | ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp); | |
2019 | if (ret == RSC_BYPASS) { | |
2020 | return virtio_net_do_receive(nc, buf, size); | |
2021 | } else if (ret == RSC_FINAL) { | |
2022 | return virtio_net_rsc_drain_flow(chain, nc, buf, size, | |
2023 | ((hdr_len + sizeof(struct eth_header)) + 8), | |
2024 | VIRTIO_NET_IP6_ADDR_SIZE, | |
2025 | hdr_len + sizeof(struct eth_header) | |
2026 | + sizeof(struct ip6_header)); | |
2027 | } | |
2028 | ||
2029 | return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit); | |
2030 | } | |
2031 | ||
2032 | static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n, | |
2033 | NetClientState *nc, | |
2034 | uint16_t proto) | |
2035 | { | |
2036 | VirtioNetRscChain *chain; | |
2037 | ||
2038 | if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) { | |
2039 | return NULL; | |
2040 | } | |
2041 | ||
2042 | QTAILQ_FOREACH(chain, &n->rsc_chains, next) { | |
2043 | if (chain->proto == proto) { | |
2044 | return chain; | |
2045 | } | |
2046 | } | |
2047 | ||
2048 | chain = g_malloc(sizeof(*chain)); | |
2049 | chain->n = n; | |
2050 | chain->proto = proto; | |
2051 | if (proto == (uint16_t)ETH_P_IP) { | |
2052 | chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD; | |
2053 | chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | |
2054 | } else { | |
2055 | chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD; | |
2056 | chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | |
2057 | } | |
2058 | chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST, | |
2059 | virtio_net_rsc_purge, chain); | |
2060 | memset(&chain->stat, 0, sizeof(chain->stat)); | |
2061 | ||
2062 | QTAILQ_INIT(&chain->buffers); | |
2063 | QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next); | |
2064 | ||
2065 | return chain; | |
2066 | } | |
2067 | ||
2068 | static ssize_t virtio_net_rsc_receive(NetClientState *nc, | |
2069 | const uint8_t *buf, | |
2070 | size_t size) | |
2071 | { | |
2072 | uint16_t proto; | |
2073 | VirtioNetRscChain *chain; | |
2074 | struct eth_header *eth; | |
2075 | VirtIONet *n; | |
2076 | ||
2077 | n = qemu_get_nic_opaque(nc); | |
2078 | if (size < (n->host_hdr_len + sizeof(struct eth_header))) { | |
2079 | return virtio_net_do_receive(nc, buf, size); | |
2080 | } | |
2081 | ||
2082 | eth = (struct eth_header *)(buf + n->guest_hdr_len); | |
2083 | proto = htons(eth->h_proto); | |
2084 | ||
2085 | chain = virtio_net_rsc_lookup_chain(n, nc, proto); | |
2086 | if (chain) { | |
2087 | chain->stat.received++; | |
2088 | if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) { | |
2089 | return virtio_net_rsc_receive4(chain, nc, buf, size); | |
2090 | } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) { | |
2091 | return virtio_net_rsc_receive6(chain, nc, buf, size); | |
2092 | } | |
2093 | } | |
2094 | return virtio_net_do_receive(nc, buf, size); | |
2095 | } | |
2096 | ||
2097 | static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, | |
2098 | size_t size) | |
2099 | { | |
2100 | VirtIONet *n = qemu_get_nic_opaque(nc); | |
2101 | if ((n->rsc4_enabled || n->rsc6_enabled)) { | |
2102 | return virtio_net_rsc_receive(nc, buf, size); | |
2103 | } else { | |
2104 | return virtio_net_do_receive(nc, buf, size); | |
2105 | } | |
2106 | } | |
2107 | ||
0c87e93e | 2108 | static int32_t virtio_net_flush_tx(VirtIONetQueue *q); |
6243375f | 2109 | |
4e68f7a0 | 2110 | static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) |
6243375f | 2111 | { |
cc1f0f45 | 2112 | VirtIONet *n = qemu_get_nic_opaque(nc); |
fed699f9 | 2113 | VirtIONetQueue *q = virtio_net_get_subqueue(nc); |
17a0ca55 | 2114 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
6243375f | 2115 | |
51b19ebe | 2116 | virtqueue_push(q->tx_vq, q->async_tx.elem, 0); |
17a0ca55 | 2117 | virtio_notify(vdev, q->tx_vq); |
6243375f | 2118 | |
51b19ebe PB |
2119 | g_free(q->async_tx.elem); |
2120 | q->async_tx.elem = NULL; | |
6243375f | 2121 | |
0c87e93e JW |
2122 | virtio_queue_set_notification(q->tx_vq, 1); |
2123 | virtio_net_flush_tx(q); | |
6243375f MM |
2124 | } |
2125 | ||
fbe78f4f | 2126 | /* TX */ |
0c87e93e | 2127 | static int32_t virtio_net_flush_tx(VirtIONetQueue *q) |
fbe78f4f | 2128 | { |
0c87e93e | 2129 | VirtIONet *n = q->n; |
17a0ca55 | 2130 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
51b19ebe | 2131 | VirtQueueElement *elem; |
e3f30488 | 2132 | int32_t num_packets = 0; |
fed699f9 | 2133 | int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); |
17a0ca55 | 2134 | if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { |
e3f30488 AW |
2135 | return num_packets; |
2136 | } | |
fbe78f4f | 2137 | |
51b19ebe | 2138 | if (q->async_tx.elem) { |
0c87e93e | 2139 | virtio_queue_set_notification(q->tx_vq, 0); |
e3f30488 | 2140 | return num_packets; |
6243375f MM |
2141 | } |
2142 | ||
51b19ebe | 2143 | for (;;) { |
bd89dd98 | 2144 | ssize_t ret; |
51b19ebe PB |
2145 | unsigned int out_num; |
2146 | struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg; | |
feb93f36 | 2147 | struct virtio_net_hdr_mrg_rxbuf mhdr; |
fbe78f4f | 2148 | |
51b19ebe PB |
2149 | elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement)); |
2150 | if (!elem) { | |
2151 | break; | |
2152 | } | |
2153 | ||
2154 | out_num = elem->out_num; | |
2155 | out_sg = elem->out_sg; | |
7b80d08e | 2156 | if (out_num < 1) { |
fa5e56c2 GK |
2157 | virtio_error(vdev, "virtio-net header not in first element"); |
2158 | virtqueue_detach_element(q->tx_vq, elem, 0); | |
2159 | g_free(elem); | |
2160 | return -EINVAL; | |
fbe78f4f AL |
2161 | } |
2162 | ||
032a74a1 | 2163 | if (n->has_vnet_hdr) { |
feb93f36 JW |
2164 | if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < |
2165 | n->guest_hdr_len) { | |
fa5e56c2 GK |
2166 | virtio_error(vdev, "virtio-net header incorrect"); |
2167 | virtqueue_detach_element(q->tx_vq, elem, 0); | |
2168 | g_free(elem); | |
2169 | return -EINVAL; | |
032a74a1 | 2170 | } |
1bfa316c | 2171 | if (n->needs_vnet_hdr_swap) { |
feb93f36 JW |
2172 | virtio_net_hdr_swap(vdev, (void *) &mhdr); |
2173 | sg2[0].iov_base = &mhdr; | |
2174 | sg2[0].iov_len = n->guest_hdr_len; | |
2175 | out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, | |
2176 | out_sg, out_num, | |
2177 | n->guest_hdr_len, -1); | |
2178 | if (out_num == VIRTQUEUE_MAX_SIZE) { | |
2179 | goto drop; | |
7d37435b | 2180 | } |
feb93f36 JW |
2181 | out_num += 1; |
2182 | out_sg = sg2; | |
7d37435b | 2183 | } |
032a74a1 | 2184 | } |
14761f9c MT |
2185 | /* |
2186 | * If host wants to see the guest header as is, we can | |
2187 | * pass it on unchanged. Otherwise, copy just the parts | |
2188 | * that host is interested in. | |
2189 | */ | |
2190 | assert(n->host_hdr_len <= n->guest_hdr_len); | |
2191 | if (n->host_hdr_len != n->guest_hdr_len) { | |
2192 | unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), | |
2193 | out_sg, out_num, | |
2194 | 0, n->host_hdr_len); | |
2195 | sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, | |
2196 | out_sg, out_num, | |
2197 | n->guest_hdr_len, -1); | |
2198 | out_num = sg_num; | |
2199 | out_sg = sg; | |
fbe78f4f AL |
2200 | } |
2201 | ||
fed699f9 JW |
2202 | ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), |
2203 | out_sg, out_num, virtio_net_tx_complete); | |
6243375f | 2204 | if (ret == 0) { |
0c87e93e JW |
2205 | virtio_queue_set_notification(q->tx_vq, 0); |
2206 | q->async_tx.elem = elem; | |
e3f30488 | 2207 | return -EBUSY; |
6243375f MM |
2208 | } |
2209 | ||
feb93f36 | 2210 | drop: |
51b19ebe | 2211 | virtqueue_push(q->tx_vq, elem, 0); |
17a0ca55 | 2212 | virtio_notify(vdev, q->tx_vq); |
51b19ebe | 2213 | g_free(elem); |
e3f30488 AW |
2214 | |
2215 | if (++num_packets >= n->tx_burst) { | |
2216 | break; | |
2217 | } | |
fbe78f4f | 2218 | } |
e3f30488 | 2219 | return num_packets; |
fbe78f4f AL |
2220 | } |
2221 | ||
a697a334 | 2222 | static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) |
fbe78f4f | 2223 | { |
17a0ca55 | 2224 | VirtIONet *n = VIRTIO_NET(vdev); |
fed699f9 | 2225 | VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; |
fbe78f4f | 2226 | |
283e2c2a YB |
2227 | if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { |
2228 | virtio_net_drop_tx_queue_data(vdev, vq); | |
2229 | return; | |
2230 | } | |
2231 | ||
783e7706 | 2232 | /* This happens when device was stopped but VCPU wasn't. */ |
17a0ca55 | 2233 | if (!vdev->vm_running) { |
0c87e93e | 2234 | q->tx_waiting = 1; |
783e7706 MT |
2235 | return; |
2236 | } | |
2237 | ||
0c87e93e | 2238 | if (q->tx_waiting) { |
fbe78f4f | 2239 | virtio_queue_set_notification(vq, 1); |
bc72ad67 | 2240 | timer_del(q->tx_timer); |
0c87e93e | 2241 | q->tx_waiting = 0; |
fa5e56c2 GK |
2242 | if (virtio_net_flush_tx(q) == -EINVAL) { |
2243 | return; | |
2244 | } | |
fbe78f4f | 2245 | } else { |
bc72ad67 AB |
2246 | timer_mod(q->tx_timer, |
2247 | qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); | |
0c87e93e | 2248 | q->tx_waiting = 1; |
fbe78f4f AL |
2249 | virtio_queue_set_notification(vq, 0); |
2250 | } | |
2251 | } | |
2252 | ||
a697a334 AW |
2253 | static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) |
2254 | { | |
17a0ca55 | 2255 | VirtIONet *n = VIRTIO_NET(vdev); |
fed699f9 | 2256 | VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; |
a697a334 | 2257 | |
283e2c2a YB |
2258 | if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { |
2259 | virtio_net_drop_tx_queue_data(vdev, vq); | |
2260 | return; | |
2261 | } | |
2262 | ||
0c87e93e | 2263 | if (unlikely(q->tx_waiting)) { |
a697a334 AW |
2264 | return; |
2265 | } | |
0c87e93e | 2266 | q->tx_waiting = 1; |
783e7706 | 2267 | /* This happens when device was stopped but VCPU wasn't. */ |
17a0ca55 | 2268 | if (!vdev->vm_running) { |
783e7706 MT |
2269 | return; |
2270 | } | |
a697a334 | 2271 | virtio_queue_set_notification(vq, 0); |
0c87e93e | 2272 | qemu_bh_schedule(q->tx_bh); |
a697a334 AW |
2273 | } |
2274 | ||
fbe78f4f AL |
2275 | static void virtio_net_tx_timer(void *opaque) |
2276 | { | |
0c87e93e JW |
2277 | VirtIONetQueue *q = opaque; |
2278 | VirtIONet *n = q->n; | |
17a0ca55 | 2279 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
e8bcf842 MT |
2280 | /* This happens when device was stopped but BH wasn't. */ |
2281 | if (!vdev->vm_running) { | |
2282 | /* Make sure tx waiting is set, so we'll run when restarted. */ | |
2283 | assert(q->tx_waiting); | |
2284 | return; | |
2285 | } | |
fbe78f4f | 2286 | |
0c87e93e | 2287 | q->tx_waiting = 0; |
fbe78f4f AL |
2288 | |
2289 | /* Just in case the driver is not ready on more */ | |
17a0ca55 | 2290 | if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { |
fbe78f4f | 2291 | return; |
17a0ca55 | 2292 | } |
fbe78f4f | 2293 | |
0c87e93e JW |
2294 | virtio_queue_set_notification(q->tx_vq, 1); |
2295 | virtio_net_flush_tx(q); | |
fbe78f4f AL |
2296 | } |
2297 | ||
a697a334 AW |
2298 | static void virtio_net_tx_bh(void *opaque) |
2299 | { | |
0c87e93e JW |
2300 | VirtIONetQueue *q = opaque; |
2301 | VirtIONet *n = q->n; | |
17a0ca55 | 2302 | VirtIODevice *vdev = VIRTIO_DEVICE(n); |
a697a334 AW |
2303 | int32_t ret; |
2304 | ||
e8bcf842 MT |
2305 | /* This happens when device was stopped but BH wasn't. */ |
2306 | if (!vdev->vm_running) { | |
2307 | /* Make sure tx waiting is set, so we'll run when restarted. */ | |
2308 | assert(q->tx_waiting); | |
2309 | return; | |
2310 | } | |
783e7706 | 2311 | |
0c87e93e | 2312 | q->tx_waiting = 0; |
a697a334 AW |
2313 | |
2314 | /* Just in case the driver is not ready on more */ | |
17a0ca55 | 2315 | if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { |
a697a334 | 2316 | return; |
17a0ca55 | 2317 | } |
a697a334 | 2318 | |
0c87e93e | 2319 | ret = virtio_net_flush_tx(q); |
fa5e56c2 GK |
2320 | if (ret == -EBUSY || ret == -EINVAL) { |
2321 | return; /* Notification re-enable handled by tx_complete or device | |
2322 | * broken */ | |
a697a334 AW |
2323 | } |
2324 | ||
2325 | /* If we flush a full burst of packets, assume there are | |
2326 | * more coming and immediately reschedule */ | |
2327 | if (ret >= n->tx_burst) { | |
0c87e93e JW |
2328 | qemu_bh_schedule(q->tx_bh); |
2329 | q->tx_waiting = 1; | |
a697a334 AW |
2330 | return; |
2331 | } | |
2332 | ||
2333 | /* If less than a full burst, re-enable notification and flush | |
2334 | * anything that may have come in while we weren't looking. If | |
2335 | * we find something, assume the guest is still active and reschedule */ | |
0c87e93e | 2336 | virtio_queue_set_notification(q->tx_vq, 1); |
fa5e56c2 GK |
2337 | ret = virtio_net_flush_tx(q); |
2338 | if (ret == -EINVAL) { | |
2339 | return; | |
2340 | } else if (ret > 0) { | |
0c87e93e JW |
2341 | virtio_queue_set_notification(q->tx_vq, 0); |
2342 | qemu_bh_schedule(q->tx_bh); | |
2343 | q->tx_waiting = 1; | |
a697a334 AW |
2344 | } |
2345 | } | |
2346 | ||
f9d6dbf0 WC |
2347 | static void virtio_net_add_queue(VirtIONet *n, int index) |
2348 | { | |
2349 | VirtIODevice *vdev = VIRTIO_DEVICE(n); | |
2350 | ||
1c0fbfa3 MT |
2351 | n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size, |
2352 | virtio_net_handle_rx); | |
9b02e161 | 2353 | |
f9d6dbf0 WC |
2354 | if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { |
2355 | n->vqs[index].tx_vq = | |
9b02e161 WW |
2356 | virtio_add_queue(vdev, n->net_conf.tx_queue_size, |
2357 | virtio_net_handle_tx_timer); | |
f9d6dbf0 WC |
2358 | n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, |
2359 | virtio_net_tx_timer, | |
2360 | &n->vqs[index]); | |
2361 | } else { | |
2362 | n->vqs[index].tx_vq = | |
9b02e161 WW |
2363 | virtio_add_queue(vdev, n->net_conf.tx_queue_size, |
2364 | virtio_net_handle_tx_bh); | |
f9d6dbf0 WC |
2365 | n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); |
2366 | } | |
2367 | ||
2368 | n->vqs[index].tx_waiting = 0; | |
2369 | n->vqs[index].n = n; | |
2370 | } | |
2371 | ||
2372 | static void virtio_net_del_queue(VirtIONet *n, int index) | |
2373 | { | |
2374 | VirtIODevice *vdev = VIRTIO_DEVICE(n); | |
2375 | VirtIONetQueue *q = &n->vqs[index]; | |
2376 | NetClientState *nc = qemu_get_subqueue(n->nic, index); | |
2377 | ||
2378 | qemu_purge_queued_packets(nc); | |
2379 | ||
2380 | virtio_del_queue(vdev, index * 2); | |
2381 | if (q->tx_timer) { | |
2382 | timer_del(q->tx_timer); | |
2383 | timer_free(q->tx_timer); | |
f989c30c | 2384 | q->tx_timer = NULL; |
f9d6dbf0 WC |
2385 | } else { |
2386 | qemu_bh_delete(q->tx_bh); | |
f989c30c | 2387 | q->tx_bh = NULL; |
f9d6dbf0 | 2388 | } |
f989c30c | 2389 | q->tx_waiting = 0; |
f9d6dbf0 WC |
2390 | virtio_del_queue(vdev, index * 2 + 1); |
2391 | } | |
2392 | ||
2393 | static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) | |
2394 | { | |
2395 | VirtIODevice *vdev = VIRTIO_DEVICE(n); | |
2396 | int old_num_queues = virtio_get_num_queues(vdev); | |
2397 | int new_num_queues = new_max_queues * 2 + 1; | |
2398 | int i; | |
2399 | ||
2400 | assert(old_num_queues >= 3); | |
2401 | assert(old_num_queues % 2 == 1); | |
2402 | ||
2403 | if (old_num_queues == new_num_queues) { | |
2404 | return; | |
2405 | } | |
2406 | ||
2407 | /* | |
2408 | * We always need to remove and add ctrl vq if | |
2409 | * old_num_queues != new_num_queues. Remove ctrl_vq first, | |
20f86a75 | 2410 | * and then we only enter one of the following two loops. |
f9d6dbf0 WC |
2411 | */ |
2412 | virtio_del_queue(vdev, old_num_queues - 1); | |
2413 | ||
2414 | for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) { | |
2415 | /* new_num_queues < old_num_queues */ | |
2416 | virtio_net_del_queue(n, i / 2); | |
2417 | } | |
2418 | ||
2419 | for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) { | |
2420 | /* new_num_queues > old_num_queues */ | |
2421 | virtio_net_add_queue(n, i / 2); | |
2422 | } | |
2423 | ||
2424 | /* add ctrl_vq last */ | |
2425 | n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); | |
2426 | } | |
2427 | ||
ec57db16 | 2428 | static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) |
fed699f9 | 2429 | { |
f9d6dbf0 WC |
2430 | int max = multiqueue ? n->max_queues : 1; |
2431 | ||
fed699f9 | 2432 | n->multiqueue = multiqueue; |
f9d6dbf0 | 2433 | virtio_net_change_num_queues(n, max); |
fed699f9 | 2434 | |
fed699f9 JW |
2435 | virtio_net_set_queues(n); |
2436 | } | |
2437 | ||
982b78c5 | 2438 | static int virtio_net_post_load_device(void *opaque, int version_id) |
037dab2f | 2439 | { |
982b78c5 DDAG |
2440 | VirtIONet *n = opaque; |
2441 | VirtIODevice *vdev = VIRTIO_DEVICE(n); | |
037dab2f | 2442 | int i, link_down; |
fbe78f4f | 2443 | |
9d8c6a25 | 2444 | trace_virtio_net_post_load_device(); |
982b78c5 | 2445 | virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs, |
95129d6f CH |
2446 | virtio_vdev_has_feature(vdev, |
2447 | VIRTIO_F_VERSION_1)); | |
fbe78f4f | 2448 | |
76010cb3 | 2449 | /* MAC_TABLE_ENTRIES may be different from the saved image */ |
982b78c5 | 2450 | if (n->mac_table.in_use > MAC_TABLE_ENTRIES) { |
76010cb3 | 2451 | n->mac_table.in_use = 0; |
b6503ed9 | 2452 | } |
0ce0e8f4 | 2453 | |
982b78c5 | 2454 | if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { |
6c666823 MT |
2455 | n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); |
2456 | } | |
2457 | ||
7788c3f2 MS |
2458 | /* |
2459 | * curr_guest_offloads will be later overwritten by the | |
2460 | * virtio_set_features_nocheck call done from the virtio_load. | |
2461 | * Here we make sure it is preserved and restored accordingly | |
2462 | * in the virtio_net_post_load_virtio callback. | |
2463 | */ | |
2464 | n->saved_guest_offloads = n->curr_guest_offloads; | |
6c666823 | 2465 | |
5f800801 JW |
2466 | virtio_net_set_queues(n); |
2467 | ||
2d9aba39 AW |
2468 | /* Find the first multicast entry in the saved MAC filter */ |
2469 | for (i = 0; i < n->mac_table.in_use; i++) { | |
2470 | if (n->mac_table.macs[i * ETH_ALEN] & 1) { | |
2471 | break; | |
2472 | } | |
2473 | } | |
2474 | n->mac_table.first_multi = i; | |
98991481 AK |
2475 | |
2476 | /* nc.link_down can't be migrated, so infer link_down according | |
2477 | * to link status bit in n->status */ | |
5f800801 JW |
2478 | link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; |
2479 | for (i = 0; i < n->max_queues; i++) { | |
2480 | qemu_get_subqueue(n->nic, i)->link_down = link_down; | |
2481 | } | |
98991481 | 2482 | |
6c666823 MT |
2483 | if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && |
2484 | virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { | |
9d8c6a25 DDAG |
2485 | qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(), |
2486 | QEMU_CLOCK_VIRTUAL, | |
2487 | virtio_net_announce_timer, n); | |
2488 | if (n->announce_timer.round) { | |
2489 | timer_mod(n->announce_timer.tm, | |
2490 | qemu_clock_get_ms(n->announce_timer.type)); | |
2491 | } else { | |
944458b6 | 2492 | qemu_announce_timer_del(&n->announce_timer, false); |
9d8c6a25 | 2493 | } |
6c666823 MT |
2494 | } |
2495 | ||
fbe78f4f AL |
2496 | return 0; |
2497 | } | |
2498 | ||
7788c3f2 MS |
2499 | static int virtio_net_post_load_virtio(VirtIODevice *vdev) |
2500 | { | |
2501 | VirtIONet *n = VIRTIO_NET(vdev); | |
2502 | /* | |
2503 | * The actual needed state is now in saved_guest_offloads, | |
2504 | * see virtio_net_post_load_device for detail. | |
2505 | * Restore it back and apply the desired offloads. | |
2506 | */ | |
2507 | n->curr_guest_offloads = n->saved_guest_offloads; | |
2508 | if (peer_has_vnet_hdr(n)) { | |
2509 | virtio_net_apply_guest_offloads(n); | |
2510 | } | |
2511 | ||
2512 | return 0; | |
2513 | } | |
2514 | ||
982b78c5 DDAG |
2515 | /* tx_waiting field of a VirtIONetQueue */ |
2516 | static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { | |
2517 | .name = "virtio-net-queue-tx_waiting", | |
2518 | .fields = (VMStateField[]) { | |
2519 | VMSTATE_UINT32(tx_waiting, VirtIONetQueue), | |
2520 | VMSTATE_END_OF_LIST() | |
2521 | }, | |
2522 | }; | |
2523 | ||
2524 | static bool max_queues_gt_1(void *opaque, int version_id) | |
2525 | { | |
2526 | return VIRTIO_NET(opaque)->max_queues > 1; | |
2527 | } | |
2528 | ||
2529 | static bool has_ctrl_guest_offloads(void *opaque, int version_id) | |
2530 | { | |
2531 | return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque), | |
2532 | VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); | |
2533 | } | |
2534 | ||
2535 | static bool mac_table_fits(void *opaque, int version_id) | |
2536 | { | |
2537 | return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES; | |
2538 | } | |
2539 | ||
2540 | static bool mac_table_doesnt_fit(void *opaque, int version_id) | |
2541 | { | |
2542 | return !mac_table_fits(opaque, version_id); | |
2543 | } | |
2544 | ||
2545 | /* This temporary type is shared by all the WITH_TMP methods | |
2546 | * although only some fields are used by each. | |
2547 | */ | |
2548 | struct VirtIONetMigTmp { | |
2549 | VirtIONet *parent; | |
2550 | VirtIONetQueue *vqs_1; | |
2551 | uint16_t curr_queues_1; | |
2552 | uint8_t has_ufo; | |
2553 | uint32_t has_vnet_hdr; | |
2554 | }; | |
2555 | ||
2556 | /* The 2nd and subsequent tx_waiting flags are loaded later than | |
2557 | * the 1st entry in the queues and only if there's more than one | |
2558 | * entry. We use the tmp mechanism to calculate a temporary | |
2559 | * pointer and count and also validate the count. | |
2560 | */ | |
2561 | ||
44b1ff31 | 2562 | static int virtio_net_tx_waiting_pre_save(void *opaque) |
982b78c5 DDAG |
2563 | { |
2564 | struct VirtIONetMigTmp *tmp = opaque; | |
2565 | ||
2566 | tmp->vqs_1 = tmp->parent->vqs + 1; | |
2567 | tmp->curr_queues_1 = tmp->parent->curr_queues - 1; | |
2568 | if (tmp->parent->curr_queues == 0) { | |
2569 | tmp->curr_queues_1 = 0; | |
2570 | } | |
44b1ff31 DDAG |
2571 | |
2572 | return 0; | |
982b78c5 DDAG |
2573 | } |
2574 | ||
2575 | static int virtio_net_tx_waiting_pre_load(void *opaque) | |
2576 | { | |
2577 | struct VirtIONetMigTmp *tmp = opaque; | |
2578 | ||
2579 | /* Reuse the pointer setup from save */ | |
2580 | virtio_net_tx_waiting_pre_save(opaque); | |
2581 | ||
2582 | if (tmp->parent->curr_queues > tmp->parent->max_queues) { | |
2583 | error_report("virtio-net: curr_queues %x > max_queues %x", | |
2584 | tmp->parent->curr_queues, tmp->parent->max_queues); | |
2585 | ||
2586 | return -EINVAL; | |
2587 | } | |
2588 | ||
2589 | return 0; /* all good */ | |
2590 | } | |
2591 | ||
2592 | static const VMStateDescription vmstate_virtio_net_tx_waiting = { | |
2593 | .name = "virtio-net-tx_waiting", | |
2594 | .pre_load = virtio_net_tx_waiting_pre_load, | |
2595 | .pre_save = virtio_net_tx_waiting_pre_save, | |
2596 | .fields = (VMStateField[]) { | |
2597 | VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, | |
2598 | curr_queues_1, | |
2599 | vmstate_virtio_net_queue_tx_waiting, | |
2600 | struct VirtIONetQueue), | |
2601 | VMSTATE_END_OF_LIST() | |
2602 | }, | |
2603 | }; | |
2604 | ||
2605 | /* the 'has_ufo' flag is just tested; if the incoming stream has the | |
2606 | * flag set we need to check that we have it | |
2607 | */ | |
2608 | static int virtio_net_ufo_post_load(void *opaque, int version_id) | |
2609 | { | |
2610 | struct VirtIONetMigTmp *tmp = opaque; | |
2611 | ||
2612 | if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) { | |
2613 | error_report("virtio-net: saved image requires TUN_F_UFO support"); | |
2614 | return -EINVAL; | |
2615 | } | |
2616 | ||
2617 | return 0; | |
2618 | } | |
2619 | ||
44b1ff31 | 2620 | static int virtio_net_ufo_pre_save(void *opaque) |
982b78c5 DDAG |
2621 | { |
2622 | struct VirtIONetMigTmp *tmp = opaque; | |
2623 | ||
2624 | tmp->has_ufo = tmp->parent->has_ufo; | |
44b1ff31 DDAG |
2625 | |
2626 | return 0; | |
982b78c5 DDAG |
2627 | } |
2628 | ||
2629 | static const VMStateDescription vmstate_virtio_net_has_ufo = { | |
2630 | .name = "virtio-net-ufo", | |
2631 | .post_load = virtio_net_ufo_post_load, | |
2632 | .pre_save = virtio_net_ufo_pre_save, | |
2633 | .fields = (VMStateField[]) { | |
2634 | VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp), | |
2635 | VMSTATE_END_OF_LIST() | |
2636 | }, | |
2637 | }; | |
2638 | ||
2639 | /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the | |
2640 | * flag set we need to check that we have it | |
2641 | */ | |
2642 | static int virtio_net_vnet_post_load(void *opaque, int version_id) | |
2643 | { | |
2644 | struct VirtIONetMigTmp *tmp = opaque; | |
2645 | ||
2646 | if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) { | |
2647 | error_report("virtio-net: saved image requires vnet_hdr=on"); | |
2648 | return -EINVAL; | |
2649 | } | |
2650 | ||
2651 | return 0; | |
2652 | } | |
2653 | ||
44b1ff31 | 2654 | static int virtio_net_vnet_pre_save(void *opaque) |
982b78c5 DDAG |
2655 | { |
2656 | struct VirtIONetMigTmp *tmp = opaque; | |
2657 | ||
2658 | tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr; | |
44b1ff31 DDAG |
2659 | |
2660 | return 0; | |
982b78c5 DDAG |
2661 | } |
2662 | ||
2663 | static const VMStateDescription vmstate_virtio_net_has_vnet = { | |
2664 | .name = "virtio-net-vnet", | |
2665 | .post_load = virtio_net_vnet_post_load, | |
2666 | .pre_save = virtio_net_vnet_pre_save, | |
2667 | .fields = (VMStateField[]) { | |
2668 | VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp), | |
2669 | VMSTATE_END_OF_LIST() | |
2670 | }, | |
2671 | }; | |
2672 | ||
2673 | static const VMStateDescription vmstate_virtio_net_device = { | |
2674 | .name = "virtio-net-device", | |
2675 | .version_id = VIRTIO_NET_VM_VERSION, | |
2676 | .minimum_version_id = VIRTIO_NET_VM_VERSION, | |
2677 | .post_load = virtio_net_post_load_device, | |
2678 | .fields = (VMStateField[]) { | |
2679 | VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN), | |
2680 | VMSTATE_STRUCT_POINTER(vqs, VirtIONet, | |
2681 | vmstate_virtio_net_queue_tx_waiting, | |
2682 | VirtIONetQueue), | |
2683 | VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet), | |
2684 | VMSTATE_UINT16(status, VirtIONet), | |
2685 | VMSTATE_UINT8(promisc, VirtIONet), | |
2686 | VMSTATE_UINT8(allmulti, VirtIONet), | |
2687 | VMSTATE_UINT32(mac_table.in_use, VirtIONet), | |
2688 | ||
2689 | /* Guarded pair: If it fits we load it, else we throw it away | |
2690 | * - can happen if source has a larger MAC table.; post-load | |
2691 | * sets flags in this case. | |
2692 | */ | |
2693 | VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet, | |
2694 | 0, mac_table_fits, mac_table.in_use, | |
2695 | ETH_ALEN), | |
2696 | VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0, | |
2697 | mac_table.in_use, ETH_ALEN), | |
2698 | ||
2699 | /* Note: This is an array of uint32's that's always been saved as a | |
2700 | * buffer; hold onto your endiannesses; it's actually used as a bitmap | |
2701 | * but based on the uint. | |
2702 | */ | |
2703 | VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3), | |
2704 | VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, | |
2705 | vmstate_virtio_net_has_vnet), | |
2706 | VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet), | |
2707 | VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet), | |
2708 | VMSTATE_UINT8(alluni, VirtIONet), | |
2709 | VMSTATE_UINT8(nomulti, VirtIONet), | |
2710 | VMSTATE_UINT8(nouni, VirtIONet), | |
2711 | VMSTATE_UINT8(nobcast, VirtIONet), | |
2712 | VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, | |
2713 | vmstate_virtio_net_has_ufo), | |
2714 | VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0, | |
2715 | vmstate_info_uint16_equal, uint16_t), | |
2716 | VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1), | |
2717 | VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, | |
2718 | vmstate_virtio_net_tx_waiting), | |
2719 | VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, | |
2720 | has_ctrl_guest_offloads), | |
2721 | VMSTATE_END_OF_LIST() | |
2722 | }, | |
2723 | }; | |
2724 | ||
eb6b6c12 | 2725 | static NetClientInfo net_virtio_info = { |
f394b2e2 | 2726 | .type = NET_CLIENT_DRIVER_NIC, |
eb6b6c12 MM |
2727 | .size = sizeof(NICState), |
2728 | .can_receive = virtio_net_can_receive, | |
2729 | .receive = virtio_net_receive, | |
eb6b6c12 | 2730 | .link_status_changed = virtio_net_set_link_status, |
b1be4280 | 2731 | .query_rx_filter = virtio_net_query_rxfilter, |
b2c929f0 | 2732 | .announce = virtio_net_announce, |
eb6b6c12 MM |
2733 | }; |
2734 | ||
f56a1247 MT |
2735 | static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) |
2736 | { | |
17a0ca55 | 2737 | VirtIONet *n = VIRTIO_NET(vdev); |
fed699f9 | 2738 | NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); |
f56a1247 | 2739 | assert(n->vhost_started); |
ed8b4afe | 2740 | return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); |
f56a1247 MT |
2741 | } |
2742 | ||
2743 | static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, | |
2744 | bool mask) | |
2745 | { | |
17a0ca55 | 2746 | VirtIONet *n = VIRTIO_NET(vdev); |
fed699f9 | 2747 | NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); |
f56a1247 | 2748 | assert(n->vhost_started); |
ed8b4afe | 2749 | vhost_net_virtqueue_mask(get_vhost_net(nc->peer), |
f56a1247 MT |
2750 | vdev, idx, mask); |
2751 | } | |
2752 | ||
019a3edb | 2753 | static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) |
fbe78f4f | 2754 | { |
0cd09c3a | 2755 | virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); |
a93e599d | 2756 | |
ba550851 SG |
2757 | n->config_size = virtio_feature_get_config_size(feature_sizes, |
2758 | host_features); | |
17ec5a86 FK |
2759 | } |
2760 | ||
8a253ec2 FK |
2761 | void virtio_net_set_netclient_name(VirtIONet *n, const char *name, |
2762 | const char *type) | |
2763 | { | |
2764 | /* | |
2765 | * The name can be NULL, the netclient name will be type.x. | |
2766 | */ | |
2767 | assert(type != NULL); | |
2768 | ||
9e288406 | 2769 | g_free(n->netclient_name); |
9e288406 | 2770 | g_free(n->netclient_type); |
80e0090a | 2771 | n->netclient_name = g_strdup(name); |
8a253ec2 FK |
2772 | n->netclient_type = g_strdup(type); |
2773 | } | |
2774 | ||
9711cd0d JF |
2775 | static bool failover_unplug_primary(VirtIONet *n) |
2776 | { | |
2777 | HotplugHandler *hotplug_ctrl; | |
2778 | PCIDevice *pci_dev; | |
2779 | Error *err = NULL; | |
2780 | ||
2781 | hotplug_ctrl = qdev_get_hotplug_handler(n->primary_dev); | |
2782 | if (hotplug_ctrl) { | |
2783 | pci_dev = PCI_DEVICE(n->primary_dev); | |
2784 | pci_dev->partially_hotplugged = true; | |
2785 | hotplug_handler_unplug_request(hotplug_ctrl, n->primary_dev, &err); | |
2786 | if (err) { | |
2787 | error_report_err(err); | |
2788 | return false; | |
2789 | } | |
2790 | } else { | |
2791 | return false; | |
2792 | } | |
2793 | return true; | |
2794 | } | |
2795 | ||
2796 | static bool failover_replug_primary(VirtIONet *n, Error **errp) | |
2797 | { | |
5a0948d3 | 2798 | Error *err = NULL; |
9711cd0d JF |
2799 | HotplugHandler *hotplug_ctrl; |
2800 | PCIDevice *pdev = PCI_DEVICE(n->primary_dev); | |
2801 | ||
2802 | if (!pdev->partially_hotplugged) { | |
2803 | return true; | |
2804 | } | |
2805 | if (!n->primary_device_opts) { | |
2806 | n->primary_device_opts = qemu_opts_from_qdict( | |
2807 | qemu_find_opts("device"), | |
2808 | n->primary_device_dict, errp); | |
150ab54a | 2809 | if (!n->primary_device_opts) { |
5a0948d3 | 2810 | return false; |
9711cd0d | 2811 | } |
150ab54a | 2812 | } |
150ab54a JF |
2813 | n->primary_bus = n->primary_dev->parent_bus; |
2814 | if (!n->primary_bus) { | |
2815 | error_setg(errp, "virtio_net: couldn't find primary bus"); | |
5a0948d3 | 2816 | return false; |
9711cd0d | 2817 | } |
150ab54a JF |
2818 | qdev_set_parent_bus(n->primary_dev, n->primary_bus); |
2819 | n->primary_should_be_hidden = false; | |
2820 | qemu_opt_set_bool(n->primary_device_opts, | |
5a0948d3 MA |
2821 | "partially_hotplugged", true, &err); |
2822 | if (err) { | |
2823 | goto out; | |
2824 | } | |
150ab54a JF |
2825 | hotplug_ctrl = qdev_get_hotplug_handler(n->primary_dev); |
2826 | if (hotplug_ctrl) { | |
5a0948d3 MA |
2827 | hotplug_handler_pre_plug(hotplug_ctrl, n->primary_dev, &err); |
2828 | if (err) { | |
2829 | goto out; | |
2830 | } | |
150ab54a JF |
2831 | hotplug_handler_plug(hotplug_ctrl, n->primary_dev, errp); |
2832 | } | |
2833 | ||
2834 | out: | |
5a0948d3 MA |
2835 | error_propagate(errp, err); |
2836 | return !err; | |
9711cd0d JF |
2837 | } |
2838 | ||
2839 | static void virtio_net_handle_migration_primary(VirtIONet *n, | |
2840 | MigrationState *s) | |
2841 | { | |
2842 | bool should_be_hidden; | |
2843 | Error *err = NULL; | |
2844 | ||
2845 | should_be_hidden = atomic_read(&n->primary_should_be_hidden); | |
2846 | ||
2847 | if (!n->primary_dev) { | |
2848 | n->primary_dev = virtio_connect_failover_devices(n, n->qdev, &err); | |
2849 | if (!n->primary_dev) { | |
2850 | return; | |
2851 | } | |
2852 | } | |
2853 | ||
4dbac1ae | 2854 | if (migration_in_setup(s) && !should_be_hidden) { |
9711cd0d | 2855 | if (failover_unplug_primary(n)) { |
3cad405b MAL |
2856 | vmstate_unregister(VMSTATE_IF(n->primary_dev), |
2857 | qdev_get_vmsd(n->primary_dev), | |
9711cd0d JF |
2858 | n->primary_dev); |
2859 | qapi_event_send_unplug_primary(n->primary_device_id); | |
2860 | atomic_set(&n->primary_should_be_hidden, true); | |
2861 | } else { | |
2862 | warn_report("couldn't unplug primary device"); | |
2863 | } | |
2864 | } else if (migration_has_failed(s)) { | |
150ab54a | 2865 | /* We already unplugged the device let's plug it back */ |
9711cd0d JF |
2866 | if (!failover_replug_primary(n, &err)) { |
2867 | if (err) { | |
2868 | error_report_err(err); | |
2869 | } | |
2870 | } | |
2871 | } | |
2872 | } | |
2873 | ||
2874 | static void virtio_net_migration_state_notifier(Notifier *notifier, void *data) | |
2875 | { | |
2876 | MigrationState *s = data; | |
2877 | VirtIONet *n = container_of(notifier, VirtIONet, migration_state); | |
2878 | virtio_net_handle_migration_primary(n, s); | |
2879 | } | |
2880 | ||
2881 | static int virtio_net_primary_should_be_hidden(DeviceListener *listener, | |
2882 | QemuOpts *device_opts) | |
2883 | { | |
2884 | VirtIONet *n = container_of(listener, VirtIONet, primary_listener); | |
4d0e59ac JF |
2885 | bool match_found = false; |
2886 | bool hide = false; | |
9711cd0d | 2887 | |
4d0e59ac JF |
2888 | if (!device_opts) { |
2889 | return -1; | |
2890 | } | |
9711cd0d JF |
2891 | n->primary_device_dict = qemu_opts_to_qdict(device_opts, |
2892 | n->primary_device_dict); | |
2893 | if (n->primary_device_dict) { | |
2894 | g_free(n->standby_id); | |
2895 | n->standby_id = g_strdup(qdict_get_try_str(n->primary_device_dict, | |
2896 | "failover_pair_id")); | |
2897 | } | |
4d0e59ac | 2898 | if (g_strcmp0(n->standby_id, n->netclient_name) == 0) { |
9711cd0d JF |
2899 | match_found = true; |
2900 | } else { | |
2901 | match_found = false; | |
2902 | hide = false; | |
2903 | g_free(n->standby_id); | |
2904 | n->primary_device_dict = NULL; | |
2905 | goto out; | |
2906 | } | |
2907 | ||
2908 | n->primary_device_opts = device_opts; | |
2909 | ||
2910 | /* primary_should_be_hidden is set during feature negotiation */ | |
2911 | hide = atomic_read(&n->primary_should_be_hidden); | |
2912 | ||
2913 | if (n->primary_device_dict) { | |
2914 | g_free(n->primary_device_id); | |
2915 | n->primary_device_id = g_strdup(qdict_get_try_str( | |
2916 | n->primary_device_dict, "id")); | |
2917 | if (!n->primary_device_id) { | |
2918 | warn_report("primary_device_id not set"); | |
2919 | } | |
2920 | } | |
2921 | ||
2922 | out: | |
2923 | if (match_found && hide) { | |
2924 | return 1; | |
2925 | } else if (match_found && !hide) { | |
2926 | return 0; | |
2927 | } else { | |
2928 | return -1; | |
2929 | } | |
2930 | } | |
2931 | ||
e6f746b3 | 2932 | static void virtio_net_device_realize(DeviceState *dev, Error **errp) |
17ec5a86 | 2933 | { |
e6f746b3 | 2934 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
284a32f0 | 2935 | VirtIONet *n = VIRTIO_NET(dev); |
b1be4280 | 2936 | NetClientState *nc; |
284a32f0 | 2937 | int i; |
1773d9ee | 2938 | |
a93e599d | 2939 | if (n->net_conf.mtu) { |
127833ee | 2940 | n->host_features |= (1ULL << VIRTIO_NET_F_MTU); |
a93e599d MC |
2941 | } |
2942 | ||
9473939e JB |
2943 | if (n->net_conf.duplex_str) { |
2944 | if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) { | |
2945 | n->net_conf.duplex = DUPLEX_HALF; | |
2946 | } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) { | |
2947 | n->net_conf.duplex = DUPLEX_FULL; | |
2948 | } else { | |
2949 | error_setg(errp, "'duplex' must be 'half' or 'full'"); | |
843c4cfc | 2950 | return; |
9473939e JB |
2951 | } |
2952 | n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX); | |
2953 | } else { | |
2954 | n->net_conf.duplex = DUPLEX_UNKNOWN; | |
2955 | } | |
2956 | ||
2957 | if (n->net_conf.speed < SPEED_UNKNOWN) { | |
2958 | error_setg(errp, "'speed' must be between 0 and INT_MAX"); | |
843c4cfc MA |
2959 | return; |
2960 | } | |
2961 | if (n->net_conf.speed >= 0) { | |
9473939e JB |
2962 | n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX); |
2963 | } | |
2964 | ||
9711cd0d JF |
2965 | if (n->failover) { |
2966 | n->primary_listener.should_be_hidden = | |
2967 | virtio_net_primary_should_be_hidden; | |
2968 | atomic_set(&n->primary_should_be_hidden, true); | |
2969 | device_listener_register(&n->primary_listener); | |
2970 | n->migration_state.notify = virtio_net_migration_state_notifier; | |
2971 | add_migration_state_change_notifier(&n->migration_state); | |
2972 | n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY); | |
2973 | } | |
2974 | ||
da3e8a23 | 2975 | virtio_net_set_config_size(n, n->host_features); |
284a32f0 | 2976 | virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); |
fbe78f4f | 2977 | |
1c0fbfa3 MT |
2978 | /* |
2979 | * We set a lower limit on RX queue size to what it always was. | |
2980 | * Guests that want a smaller ring can always resize it without | |
2981 | * help from us (using virtio 1 and up). | |
2982 | */ | |
2983 | if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE || | |
2984 | n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE || | |
5f997fd1 | 2985 | !is_power_of_2(n->net_conf.rx_queue_size)) { |
1c0fbfa3 MT |
2986 | error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), " |
2987 | "must be a power of 2 between %d and %d.", | |
2988 | n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE, | |
2989 | VIRTQUEUE_MAX_SIZE); | |
2990 | virtio_cleanup(vdev); | |
2991 | return; | |
2992 | } | |
2993 | ||
9b02e161 WW |
2994 | if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE || |
2995 | n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE || | |
2996 | !is_power_of_2(n->net_conf.tx_queue_size)) { | |
2997 | error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), " | |
2998 | "must be a power of 2 between %d and %d", | |
2999 | n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE, | |
3000 | VIRTQUEUE_MAX_SIZE); | |
3001 | virtio_cleanup(vdev); | |
3002 | return; | |
3003 | } | |
3004 | ||
575a1c0e | 3005 | n->max_queues = MAX(n->nic_conf.peers.queues, 1); |
87b3bd1c | 3006 | if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) { |
7e0e736e | 3007 | error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " |
631b22ea | 3008 | "must be a positive integer less than %d.", |
87b3bd1c | 3009 | n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2); |
7e0e736e JW |
3010 | virtio_cleanup(vdev); |
3011 | return; | |
3012 | } | |
f6b26cf2 | 3013 | n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); |
fed699f9 | 3014 | n->curr_queues = 1; |
1773d9ee | 3015 | n->tx_timeout = n->net_conf.txtimer; |
a697a334 | 3016 | |
1773d9ee FK |
3017 | if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") |
3018 | && strcmp(n->net_conf.tx, "bh")) { | |
0765691e MA |
3019 | warn_report("virtio-net: " |
3020 | "Unknown option tx=%s, valid options: \"timer\" \"bh\"", | |
3021 | n->net_conf.tx); | |
3022 | error_printf("Defaulting to \"bh\""); | |
a697a334 AW |
3023 | } |
3024 | ||
2eef278b MT |
3025 | n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n), |
3026 | n->net_conf.tx_queue_size); | |
9b02e161 | 3027 | |
da51a335 | 3028 | for (i = 0; i < n->max_queues; i++) { |
f9d6dbf0 | 3029 | virtio_net_add_queue(n, i); |
a697a334 | 3030 | } |
da51a335 | 3031 | |
17a0ca55 | 3032 | n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); |
1773d9ee FK |
3033 | qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); |
3034 | memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); | |
554c97dd | 3035 | n->status = VIRTIO_NET_S_LINK_UP; |
9d8c6a25 DDAG |
3036 | qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(), |
3037 | QEMU_CLOCK_VIRTUAL, | |
3038 | virtio_net_announce_timer, n); | |
b2c929f0 | 3039 | n->announce_timer.round = 0; |
fbe78f4f | 3040 | |
8a253ec2 FK |
3041 | if (n->netclient_type) { |
3042 | /* | |
3043 | * Happen when virtio_net_set_netclient_name has been called. | |
3044 | */ | |
3045 | n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, | |
3046 | n->netclient_type, n->netclient_name, n); | |
3047 | } else { | |
3048 | n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, | |
284a32f0 | 3049 | object_get_typename(OBJECT(dev)), dev->id, n); |
8a253ec2 FK |
3050 | } |
3051 | ||
6e371ab8 MT |
3052 | peer_test_vnet_hdr(n); |
3053 | if (peer_has_vnet_hdr(n)) { | |
fed699f9 | 3054 | for (i = 0; i < n->max_queues; i++) { |
d6085e3a | 3055 | qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); |
fed699f9 | 3056 | } |
6e371ab8 MT |
3057 | n->host_hdr_len = sizeof(struct virtio_net_hdr); |
3058 | } else { | |
3059 | n->host_hdr_len = 0; | |
3060 | } | |
eb6b6c12 | 3061 | |
1773d9ee | 3062 | qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); |
96d5e201 | 3063 | |
fed699f9 | 3064 | n->vqs[0].tx_waiting = 0; |
1773d9ee | 3065 | n->tx_burst = n->net_conf.txburst; |
bb9d17f8 | 3066 | virtio_net_set_mrg_rx_bufs(n, 0, 0); |
002437cd | 3067 | n->promisc = 1; /* for compatibility */ |
fbe78f4f | 3068 | |
7267c094 | 3069 | n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); |
b6503ed9 | 3070 | |
7267c094 | 3071 | n->vlans = g_malloc0(MAX_VLAN >> 3); |
f21c0ed9 | 3072 | |
b1be4280 AK |
3073 | nc = qemu_get_queue(n->nic); |
3074 | nc->rxfilter_notify_enabled = 1; | |
3075 | ||
2974e916 | 3076 | QTAILQ_INIT(&n->rsc_chains); |
284a32f0 | 3077 | n->qdev = dev; |
17ec5a86 FK |
3078 | } |
3079 | ||
306ec6c3 | 3080 | static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) |
17ec5a86 | 3081 | { |
306ec6c3 AF |
3082 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
3083 | VirtIONet *n = VIRTIO_NET(dev); | |
f9d6dbf0 | 3084 | int i, max_queues; |
17ec5a86 FK |
3085 | |
3086 | /* This will stop vhost backend if appropriate. */ | |
3087 | virtio_net_set_status(vdev, 0); | |
3088 | ||
9e288406 MA |
3089 | g_free(n->netclient_name); |
3090 | n->netclient_name = NULL; | |
3091 | g_free(n->netclient_type); | |
3092 | n->netclient_type = NULL; | |
8a253ec2 | 3093 | |
17ec5a86 FK |
3094 | g_free(n->mac_table.macs); |
3095 | g_free(n->vlans); | |
3096 | ||
9711cd0d JF |
3097 | if (n->failover) { |
3098 | g_free(n->primary_device_id); | |
3099 | g_free(n->standby_id); | |
3100 | qobject_unref(n->primary_device_dict); | |
3101 | n->primary_device_dict = NULL; | |
3102 | } | |
3103 | ||
f9d6dbf0 WC |
3104 | max_queues = n->multiqueue ? n->max_queues : 1; |
3105 | for (i = 0; i < max_queues; i++) { | |
3106 | virtio_net_del_queue(n, i); | |
17ec5a86 | 3107 | } |
d945d9f1 YB |
3108 | /* delete also control vq */ |
3109 | virtio_del_queue(vdev, max_queues * 2); | |
944458b6 | 3110 | qemu_announce_timer_del(&n->announce_timer, false); |
17ec5a86 FK |
3111 | g_free(n->vqs); |
3112 | qemu_del_nic(n->nic); | |
2974e916 | 3113 | virtio_net_rsc_cleanup(n); |
6a1a8cc7 | 3114 | virtio_cleanup(vdev); |
17ec5a86 FK |
3115 | } |
3116 | ||
3117 | static void virtio_net_instance_init(Object *obj) | |
3118 | { | |
3119 | VirtIONet *n = VIRTIO_NET(obj); | |
3120 | ||
3121 | /* | |
3122 | * The default config_size is sizeof(struct virtio_net_config). | |
3123 | * Can be overriden with virtio_net_set_config_size. | |
3124 | */ | |
3125 | n->config_size = sizeof(struct virtio_net_config); | |
aa4197c3 GA |
3126 | device_add_bootindex_property(obj, &n->nic_conf.bootindex, |
3127 | "bootindex", "/ethernet-phy@0", | |
3128 | DEVICE(n), NULL); | |
17ec5a86 FK |
3129 | } |
3130 | ||
44b1ff31 | 3131 | static int virtio_net_pre_save(void *opaque) |
4d45dcfb HP |
3132 | { |
3133 | VirtIONet *n = opaque; | |
3134 | ||
3135 | /* At this point, backend must be stopped, otherwise | |
3136 | * it might keep writing to memory. */ | |
3137 | assert(!n->vhost_started); | |
44b1ff31 DDAG |
3138 | |
3139 | return 0; | |
4d45dcfb HP |
3140 | } |
3141 | ||
9711cd0d JF |
3142 | static bool primary_unplug_pending(void *opaque) |
3143 | { | |
3144 | DeviceState *dev = opaque; | |
3145 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); | |
3146 | VirtIONet *n = VIRTIO_NET(vdev); | |
3147 | ||
284f42a5 JF |
3148 | if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { |
3149 | return false; | |
3150 | } | |
9711cd0d JF |
3151 | return n->primary_dev ? n->primary_dev->pending_deleted_event : false; |
3152 | } | |
3153 | ||
3154 | static bool dev_unplug_pending(void *opaque) | |
3155 | { | |
3156 | DeviceState *dev = opaque; | |
3157 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); | |
3158 | ||
3159 | return vdc->primary_unplug_pending(dev); | |
3160 | } | |
3161 | ||
4d45dcfb HP |
3162 | static const VMStateDescription vmstate_virtio_net = { |
3163 | .name = "virtio-net", | |
3164 | .minimum_version_id = VIRTIO_NET_VM_VERSION, | |
3165 | .version_id = VIRTIO_NET_VM_VERSION, | |
3166 | .fields = (VMStateField[]) { | |
3167 | VMSTATE_VIRTIO_DEVICE, | |
3168 | VMSTATE_END_OF_LIST() | |
3169 | }, | |
3170 | .pre_save = virtio_net_pre_save, | |
9711cd0d | 3171 | .dev_unplug_pending = dev_unplug_pending, |
4d45dcfb | 3172 | }; |
290c2428 | 3173 | |
17ec5a86 | 3174 | static Property virtio_net_properties[] = { |
127833ee JB |
3175 | DEFINE_PROP_BIT64("csum", VirtIONet, host_features, |
3176 | VIRTIO_NET_F_CSUM, true), | |
3177 | DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features, | |
87108bb2 | 3178 | VIRTIO_NET_F_GUEST_CSUM, true), |
127833ee JB |
3179 | DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true), |
3180 | DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features, | |
87108bb2 | 3181 | VIRTIO_NET_F_GUEST_TSO4, true), |
127833ee | 3182 | DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features, |
87108bb2 | 3183 | VIRTIO_NET_F_GUEST_TSO6, true), |
127833ee | 3184 | DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features, |
87108bb2 | 3185 | VIRTIO_NET_F_GUEST_ECN, true), |
127833ee | 3186 | DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features, |
87108bb2 | 3187 | VIRTIO_NET_F_GUEST_UFO, true), |
127833ee | 3188 | DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features, |
87108bb2 | 3189 | VIRTIO_NET_F_GUEST_ANNOUNCE, true), |
127833ee | 3190 | DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features, |
87108bb2 | 3191 | VIRTIO_NET_F_HOST_TSO4, true), |
127833ee | 3192 | DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features, |
87108bb2 | 3193 | VIRTIO_NET_F_HOST_TSO6, true), |
127833ee | 3194 | DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features, |
87108bb2 | 3195 | VIRTIO_NET_F_HOST_ECN, true), |
127833ee | 3196 | DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features, |
87108bb2 | 3197 | VIRTIO_NET_F_HOST_UFO, true), |
127833ee | 3198 | DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features, |
87108bb2 | 3199 | VIRTIO_NET_F_MRG_RXBUF, true), |
127833ee | 3200 | DEFINE_PROP_BIT64("status", VirtIONet, host_features, |
87108bb2 | 3201 | VIRTIO_NET_F_STATUS, true), |
127833ee | 3202 | DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features, |
87108bb2 | 3203 | VIRTIO_NET_F_CTRL_VQ, true), |
127833ee | 3204 | DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features, |
87108bb2 | 3205 | VIRTIO_NET_F_CTRL_RX, true), |
127833ee | 3206 | DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features, |
87108bb2 | 3207 | VIRTIO_NET_F_CTRL_VLAN, true), |
127833ee | 3208 | DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features, |
87108bb2 | 3209 | VIRTIO_NET_F_CTRL_RX_EXTRA, true), |
127833ee | 3210 | DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features, |
87108bb2 | 3211 | VIRTIO_NET_F_CTRL_MAC_ADDR, true), |
127833ee | 3212 | DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features, |
87108bb2 | 3213 | VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true), |
127833ee | 3214 | DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false), |
2974e916 YB |
3215 | DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features, |
3216 | VIRTIO_NET_F_RSC_EXT, false), | |
3217 | DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout, | |
3218 | VIRTIO_NET_RSC_DEFAULT_INTERVAL), | |
17ec5a86 FK |
3219 | DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), |
3220 | DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, | |
87108bb2 | 3221 | TX_TIMER_INTERVAL), |
17ec5a86 FK |
3222 | DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), |
3223 | DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), | |
1c0fbfa3 MT |
3224 | DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, |
3225 | VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE), | |
9b02e161 WW |
3226 | DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size, |
3227 | VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE), | |
a93e599d | 3228 | DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0), |
75ebec11 MC |
3229 | DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend, |
3230 | true), | |
9473939e JB |
3231 | DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN), |
3232 | DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str), | |
9711cd0d | 3233 | DEFINE_PROP_BOOL("failover", VirtIONet, failover, false), |
17ec5a86 FK |
3234 | DEFINE_PROP_END_OF_LIST(), |
3235 | }; | |
3236 | ||
3237 | static void virtio_net_class_init(ObjectClass *klass, void *data) | |
3238 | { | |
3239 | DeviceClass *dc = DEVICE_CLASS(klass); | |
3240 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); | |
e6f746b3 | 3241 | |
4f67d30b | 3242 | device_class_set_props(dc, virtio_net_properties); |
290c2428 | 3243 | dc->vmsd = &vmstate_virtio_net; |
125ee0ed | 3244 | set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); |
e6f746b3 | 3245 | vdc->realize = virtio_net_device_realize; |
306ec6c3 | 3246 | vdc->unrealize = virtio_net_device_unrealize; |
17ec5a86 FK |
3247 | vdc->get_config = virtio_net_get_config; |
3248 | vdc->set_config = virtio_net_set_config; | |
3249 | vdc->get_features = virtio_net_get_features; | |
3250 | vdc->set_features = virtio_net_set_features; | |
3251 | vdc->bad_features = virtio_net_bad_features; | |
3252 | vdc->reset = virtio_net_reset; | |
3253 | vdc->set_status = virtio_net_set_status; | |
3254 | vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; | |
3255 | vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; | |
2a083ffd | 3256 | vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO); |
7788c3f2 | 3257 | vdc->post_load = virtio_net_post_load_virtio; |
982b78c5 | 3258 | vdc->vmsd = &vmstate_virtio_net_device; |
9711cd0d | 3259 | vdc->primary_unplug_pending = primary_unplug_pending; |
17ec5a86 FK |
3260 | } |
3261 | ||
3262 | static const TypeInfo virtio_net_info = { | |
3263 | .name = TYPE_VIRTIO_NET, | |
3264 | .parent = TYPE_VIRTIO_DEVICE, | |
3265 | .instance_size = sizeof(VirtIONet), | |
3266 | .instance_init = virtio_net_instance_init, | |
3267 | .class_init = virtio_net_class_init, | |
3268 | }; | |
3269 | ||
3270 | static void virtio_register_types(void) | |
3271 | { | |
3272 | type_register_static(&virtio_net_info); | |
3273 | } | |
3274 | ||
3275 | type_init(virtio_register_types) |