]>
Commit | Line | Data |
---|---|---|
e613b064 AL |
1 | /* |
2 | * xen paravirt network card backend | |
3 | * | |
4 | * (c) Gerd Hoffmann <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; under version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
8167ee88 | 16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
6b620ca3 PB |
17 | * |
18 | * Contributions after 2012-01-13 are licensed under the terms of the | |
19 | * GNU GPL, version 2 or (at your option) any later version. | |
e613b064 AL |
20 | */ |
21 | ||
21cbfe5f | 22 | #include "qemu/osdep.h" |
e613b064 AL |
23 | #include <sys/socket.h> |
24 | #include <sys/ioctl.h> | |
e613b064 | 25 | #include <sys/wait.h> |
e613b064 | 26 | |
1422e32d | 27 | #include "net/net.h" |
7200ac3c | 28 | #include "net/checksum.h" |
658788c5 | 29 | #include "net/util.h" |
2d0ed5e6 | 30 | #include "hw/xen/xen-legacy-backend.h" |
e613b064 | 31 | |
a3434a2d | 32 | #include "hw/xen/interface/io/netif.h" |
b41f6719 | 33 | |
e613b064 AL |
34 | /* ------------------------------------------------------------- */ |
35 | ||
36 | struct XenNetDev { | |
2d0ed5e6 | 37 | struct XenLegacyDevice xendev; /* must be first */ |
e613b064 AL |
38 | char *mac; |
39 | int tx_work; | |
40 | int tx_ring_ref; | |
41 | int rx_ring_ref; | |
42 | struct netif_tx_sring *txs; | |
43 | struct netif_rx_sring *rxs; | |
44 | netif_tx_back_ring_t tx_ring; | |
45 | netif_rx_back_ring_t rx_ring; | |
658788c5 MM |
46 | NICConf conf; |
47 | NICState *nic; | |
e613b064 AL |
48 | }; |
49 | ||
50 | /* ------------------------------------------------------------- */ | |
51 | ||
52 | static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st) | |
53 | { | |
54 | RING_IDX i = netdev->tx_ring.rsp_prod_pvt; | |
55 | netif_tx_response_t *resp; | |
56 | int notify; | |
57 | ||
58 | resp = RING_GET_RESPONSE(&netdev->tx_ring, i); | |
59 | resp->id = txp->id; | |
60 | resp->status = st; | |
61 | ||
62 | #if 0 | |
209cd7ab AP |
63 | if (txp->flags & NETTXF_extra_info) { |
64 | RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; | |
65 | } | |
e613b064 AL |
66 | #endif |
67 | ||
68 | netdev->tx_ring.rsp_prod_pvt = ++i; | |
69 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify); | |
209cd7ab | 70 | if (notify) { |
ba18fa2a | 71 | xen_pv_send_notify(&netdev->xendev); |
209cd7ab | 72 | } |
e613b064 AL |
73 | |
74 | if (i == netdev->tx_ring.req_cons) { | |
209cd7ab AP |
75 | int more_to_do; |
76 | RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); | |
77 | if (more_to_do) { | |
78 | netdev->tx_work++; | |
79 | } | |
e613b064 AL |
80 | } |
81 | } | |
82 | ||
83 | static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end) | |
84 | { | |
85 | #if 0 | |
86 | /* | |
87 | * Hmm, why netback fails everything in the ring? | |
88 | * Should we do that even when not supporting SG and TSO? | |
89 | */ | |
90 | RING_IDX cons = netdev->tx_ring.req_cons; | |
91 | ||
92 | do { | |
209cd7ab AP |
93 | make_tx_response(netif, txp, NETIF_RSP_ERROR); |
94 | if (cons >= end) { | |
95 | break; | |
96 | } | |
97 | txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); | |
e613b064 AL |
98 | } while (1); |
99 | netdev->tx_ring.req_cons = cons; | |
100 | netif_schedule_work(netif); | |
101 | netif_put(netif); | |
102 | #else | |
103 | net_tx_response(netdev, txp, NETIF_RSP_ERROR); | |
104 | #endif | |
105 | } | |
106 | ||
107 | static void net_tx_packets(struct XenNetDev *netdev) | |
108 | { | |
109 | netif_tx_request_t txreq; | |
110 | RING_IDX rc, rp; | |
111 | void *page; | |
112 | void *tmpbuf = NULL; | |
113 | ||
114 | for (;;) { | |
209cd7ab AP |
115 | rc = netdev->tx_ring.req_cons; |
116 | rp = netdev->tx_ring.sring->req_prod; | |
117 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
e613b064 | 118 | |
209cd7ab AP |
119 | while ((rc != rp)) { |
120 | if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) { | |
121 | break; | |
122 | } | |
123 | memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); | |
124 | netdev->tx_ring.req_cons = ++rc; | |
e613b064 AL |
125 | |
126 | #if 1 | |
209cd7ab AP |
127 | /* should not happen in theory, we don't announce the * |
128 | * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ | |
129 | if (txreq.flags & NETTXF_extra_info) { | |
96c77dba | 130 | xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); |
209cd7ab AP |
131 | net_tx_error(netdev, &txreq, rc); |
132 | continue; | |
133 | } | |
134 | if (txreq.flags & NETTXF_more_data) { | |
96c77dba | 135 | xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); |
209cd7ab AP |
136 | net_tx_error(netdev, &txreq, rc); |
137 | continue; | |
138 | } | |
e613b064 AL |
139 | #endif |
140 | ||
209cd7ab | 141 | if (txreq.size < 14) { |
96c77dba | 142 | xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n", |
b9730c5b | 143 | txreq.size); |
209cd7ab AP |
144 | net_tx_error(netdev, &txreq, rc); |
145 | continue; | |
146 | } | |
147 | ||
148 | if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { | |
96c77dba | 149 | xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n"); |
209cd7ab AP |
150 | net_tx_error(netdev, &txreq, rc); |
151 | continue; | |
152 | } | |
153 | ||
96c77dba | 154 | xen_pv_printf(&netdev->xendev, 3, |
c22e91b1 | 155 | "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", |
209cd7ab AP |
156 | txreq.gref, txreq.offset, txreq.size, txreq.flags, |
157 | (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", | |
158 | (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", | |
159 | (txreq.flags & NETTXF_more_data) ? " more_data" : "", | |
160 | (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); | |
161 | ||
58560f2a PD |
162 | page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref, |
163 | PROT_READ); | |
209cd7ab | 164 | if (page == NULL) { |
96c77dba | 165 | xen_pv_printf(&netdev->xendev, 0, |
c22e91b1 EC |
166 | "error: tx gref dereference failed (%d)\n", |
167 | txreq.gref); | |
209cd7ab AP |
168 | net_tx_error(netdev, &txreq, rc); |
169 | continue; | |
170 | } | |
171 | if (txreq.flags & NETTXF_csum_blank) { | |
e613b064 | 172 | /* have read-only mapping -> can't fill checksum in-place */ |
209cd7ab | 173 | if (!tmpbuf) { |
7267c094 | 174 | tmpbuf = g_malloc(XC_PAGE_SIZE); |
209cd7ab | 175 | } |
e613b064 | 176 | memcpy(tmpbuf, page + txreq.offset, txreq.size); |
209cd7ab | 177 | net_checksum_calculate(tmpbuf, txreq.size); |
b356f76d JW |
178 | qemu_send_packet(qemu_get_queue(netdev->nic), tmpbuf, |
179 | txreq.size); | |
e613b064 | 180 | } else { |
b356f76d JW |
181 | qemu_send_packet(qemu_get_queue(netdev->nic), |
182 | page + txreq.offset, txreq.size); | |
e613b064 | 183 | } |
58560f2a | 184 | xen_be_unmap_grant_ref(&netdev->xendev, page); |
209cd7ab AP |
185 | net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); |
186 | } | |
187 | if (!netdev->tx_work) { | |
188 | break; | |
189 | } | |
190 | netdev->tx_work = 0; | |
e613b064 | 191 | } |
7267c094 | 192 | g_free(tmpbuf); |
e613b064 AL |
193 | } |
194 | ||
195 | /* ------------------------------------------------------------- */ | |
196 | ||
197 | static void net_rx_response(struct XenNetDev *netdev, | |
209cd7ab AP |
198 | netif_rx_request_t *req, int8_t st, |
199 | uint16_t offset, uint16_t size, | |
200 | uint16_t flags) | |
e613b064 AL |
201 | { |
202 | RING_IDX i = netdev->rx_ring.rsp_prod_pvt; | |
203 | netif_rx_response_t *resp; | |
204 | int notify; | |
205 | ||
206 | resp = RING_GET_RESPONSE(&netdev->rx_ring, i); | |
207 | resp->offset = offset; | |
208 | resp->flags = flags; | |
209 | resp->id = req->id; | |
210 | resp->status = (int16_t)size; | |
209cd7ab AP |
211 | if (st < 0) { |
212 | resp->status = (int16_t)st; | |
213 | } | |
e613b064 | 214 | |
96c77dba | 215 | xen_pv_printf(&netdev->xendev, 3, |
b9730c5b | 216 | "rx response: idx %d, status %d, flags 0x%x\n", |
209cd7ab | 217 | i, resp->status, resp->flags); |
e613b064 AL |
218 | |
219 | netdev->rx_ring.rsp_prod_pvt = ++i; | |
220 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify); | |
209cd7ab | 221 | if (notify) { |
ba18fa2a | 222 | xen_pv_send_notify(&netdev->xendev); |
209cd7ab | 223 | } |
e613b064 AL |
224 | } |
225 | ||
226 | #define NET_IP_ALIGN 2 | |
227 | ||
4e68f7a0 | 228 | static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size) |
e613b064 | 229 | { |
cc1f0f45 | 230 | struct XenNetDev *netdev = qemu_get_nic_opaque(nc); |
e613b064 AL |
231 | netif_rx_request_t rxreq; |
232 | RING_IDX rc, rp; | |
233 | void *page; | |
234 | ||
209cd7ab AP |
235 | if (netdev->xendev.be_state != XenbusStateConnected) { |
236 | return -1; | |
237 | } | |
e613b064 AL |
238 | |
239 | rc = netdev->rx_ring.req_cons; | |
240 | rp = netdev->rx_ring.sring->req_prod; | |
241 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
242 | ||
243 | if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { | |
7bba83bf | 244 | return 0; |
e613b064 AL |
245 | } |
246 | if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { | |
96c77dba | 247 | xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", |
209cd7ab AP |
248 | (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); |
249 | return -1; | |
e613b064 AL |
250 | } |
251 | ||
252 | memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); | |
253 | netdev->rx_ring.req_cons = ++rc; | |
254 | ||
58560f2a | 255 | page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE); |
e613b064 | 256 | if (page == NULL) { |
96c77dba | 257 | xen_pv_printf(&netdev->xendev, 0, |
b9730c5b | 258 | "error: rx gref dereference failed (%d)\n", |
e613b064 | 259 | rxreq.gref); |
209cd7ab AP |
260 | net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); |
261 | return -1; | |
e613b064 AL |
262 | } |
263 | memcpy(page + NET_IP_ALIGN, buf, size); | |
58560f2a | 264 | xen_be_unmap_grant_ref(&netdev->xendev, page); |
e613b064 | 265 | net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0); |
4f1c942b MM |
266 | |
267 | return size; | |
e613b064 AL |
268 | } |
269 | ||
270 | /* ------------------------------------------------------------- */ | |
271 | ||
658788c5 | 272 | static NetClientInfo net_xen_info = { |
f394b2e2 | 273 | .type = NET_CLIENT_DRIVER_NIC, |
658788c5 | 274 | .size = sizeof(NICState), |
658788c5 MM |
275 | .receive = net_rx_packet, |
276 | }; | |
277 | ||
2d0ed5e6 | 278 | static int net_init(struct XenLegacyDevice *xendev) |
e613b064 AL |
279 | { |
280 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
e613b064 AL |
281 | |
282 | /* read xenstore entries */ | |
209cd7ab AP |
283 | if (netdev->mac == NULL) { |
284 | netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); | |
285 | } | |
e613b064 AL |
286 | |
287 | /* do we have all we need? */ | |
209cd7ab AP |
288 | if (netdev->mac == NULL) { |
289 | return -1; | |
290 | } | |
e613b064 | 291 | |
209cd7ab | 292 | if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) { |
658788c5 | 293 | return -1; |
209cd7ab | 294 | } |
658788c5 | 295 | |
658788c5 MM |
296 | netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf, |
297 | "xen", NULL, netdev); | |
298 | ||
b356f76d JW |
299 | snprintf(qemu_get_queue(netdev->nic)->info_str, |
300 | sizeof(qemu_get_queue(netdev->nic)->info_str), | |
e613b064 AL |
301 | "nic: xenbus vif macaddr=%s", netdev->mac); |
302 | ||
303 | /* fill info */ | |
304 | xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1); | |
305 | xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0); | |
306 | ||
307 | return 0; | |
308 | } | |
309 | ||
2d0ed5e6 | 310 | static int net_connect(struct XenLegacyDevice *xendev) |
e613b064 AL |
311 | { |
312 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
313 | int rx_copy; | |
314 | ||
315 | if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref", | |
209cd7ab AP |
316 | &netdev->tx_ring_ref) == -1) { |
317 | return -1; | |
318 | } | |
e613b064 | 319 | if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref", |
209cd7ab AP |
320 | &netdev->rx_ring_ref) == -1) { |
321 | return 1; | |
322 | } | |
e613b064 | 323 | if (xenstore_read_fe_int(&netdev->xendev, "event-channel", |
209cd7ab AP |
324 | &netdev->xendev.remote_port) == -1) { |
325 | return -1; | |
326 | } | |
e613b064 | 327 | |
209cd7ab AP |
328 | if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) { |
329 | rx_copy = 0; | |
330 | } | |
e613b064 | 331 | if (rx_copy == 0) { |
96c77dba | 332 | xen_pv_printf(&netdev->xendev, 0, |
b9730c5b | 333 | "frontend doesn't support rx-copy.\n"); |
209cd7ab | 334 | return -1; |
e613b064 AL |
335 | } |
336 | ||
58560f2a PD |
337 | netdev->txs = xen_be_map_grant_ref(&netdev->xendev, |
338 | netdev->tx_ring_ref, | |
339 | PROT_READ | PROT_WRITE); | |
b4f72e31 CG |
340 | if (!netdev->txs) { |
341 | return -1; | |
342 | } | |
58560f2a PD |
343 | netdev->rxs = xen_be_map_grant_ref(&netdev->xendev, |
344 | netdev->rx_ring_ref, | |
345 | PROT_READ | PROT_WRITE); | |
b4f72e31 | 346 | if (!netdev->rxs) { |
58560f2a | 347 | xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); |
b4f72e31 | 348 | netdev->txs = NULL; |
209cd7ab AP |
349 | return -1; |
350 | } | |
e613b064 AL |
351 | BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); |
352 | BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); | |
353 | ||
354 | xen_be_bind_evtchn(&netdev->xendev); | |
355 | ||
96c77dba | 356 | xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, " |
209cd7ab AP |
357 | "remote port %d, local port %d\n", |
358 | netdev->tx_ring_ref, netdev->rx_ring_ref, | |
359 | netdev->xendev.remote_port, netdev->xendev.local_port); | |
3e3cabcf GH |
360 | |
361 | net_tx_packets(netdev); | |
e613b064 AL |
362 | return 0; |
363 | } | |
364 | ||
2d0ed5e6 | 365 | static void net_disconnect(struct XenLegacyDevice *xendev) |
e613b064 AL |
366 | { |
367 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
368 | ||
65807f4b | 369 | xen_pv_unbind_evtchn(&netdev->xendev); |
e613b064 AL |
370 | |
371 | if (netdev->txs) { | |
58560f2a | 372 | xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); |
209cd7ab | 373 | netdev->txs = NULL; |
e613b064 AL |
374 | } |
375 | if (netdev->rxs) { | |
58560f2a | 376 | xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs); |
209cd7ab | 377 | netdev->rxs = NULL; |
e613b064 | 378 | } |
e613b064 AL |
379 | } |
380 | ||
2d0ed5e6 | 381 | static void net_event(struct XenLegacyDevice *xendev) |
e613b064 AL |
382 | { |
383 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
384 | net_tx_packets(netdev); | |
b356f76d | 385 | qemu_flush_queued_packets(qemu_get_queue(netdev->nic)); |
e613b064 AL |
386 | } |
387 | ||
2d0ed5e6 | 388 | static int net_free(struct XenLegacyDevice *xendev) |
e613b064 AL |
389 | { |
390 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
391 | ||
d4685837 CG |
392 | if (netdev->nic) { |
393 | qemu_del_nic(netdev->nic); | |
394 | netdev->nic = NULL; | |
395 | } | |
7267c094 | 396 | g_free(netdev->mac); |
a39d97c7 | 397 | netdev->mac = NULL; |
e613b064 AL |
398 | return 0; |
399 | } | |
400 | ||
401 | /* ------------------------------------------------------------- */ | |
402 | ||
403 | struct XenDevOps xen_netdev_ops = { | |
404 | .size = sizeof(struct XenNetDev), | |
405 | .flags = DEVOPS_FLAG_NEED_GNTDEV, | |
406 | .init = net_init, | |
384087b2 | 407 | .initialise = net_connect, |
e613b064 AL |
408 | .event = net_event, |
409 | .disconnect = net_disconnect, | |
410 | .free = net_free, | |
411 | }; |