]>
Commit | Line | Data |
---|---|---|
e613b064 AL |
1 | /* |
2 | * xen paravirt network card backend | |
3 | * | |
4 | * (c) Gerd Hoffmann <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; under version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
8167ee88 | 16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
e613b064 AL |
17 | */ |
18 | ||
19 | #include <stdio.h> | |
20 | #include <stdlib.h> | |
21 | #include <stdarg.h> | |
22 | #include <string.h> | |
23 | #include <unistd.h> | |
24 | #include <signal.h> | |
25 | #include <inttypes.h> | |
26 | #include <fcntl.h> | |
27 | #include <errno.h> | |
28 | #include <pthread.h> | |
29 | #include <sys/socket.h> | |
30 | #include <sys/ioctl.h> | |
31 | #include <sys/types.h> | |
32 | #include <sys/stat.h> | |
33 | #include <sys/mman.h> | |
34 | #include <sys/wait.h> | |
e613b064 AL |
35 | |
36 | #include <xs.h> | |
37 | #include <xenctrl.h> | |
38 | #include <xen/io/xenbus.h> | |
39 | #include <xen/io/netif.h> | |
40 | ||
41 | #include "hw.h" | |
42 | #include "net.h" | |
7200ac3c | 43 | #include "net/checksum.h" |
658788c5 | 44 | #include "net/util.h" |
e613b064 AL |
45 | #include "qemu-char.h" |
46 | #include "xen_backend.h" | |
47 | ||
48 | /* ------------------------------------------------------------- */ | |
49 | ||
50 | struct XenNetDev { | |
51 | struct XenDevice xendev; /* must be first */ | |
52 | char *mac; | |
53 | int tx_work; | |
54 | int tx_ring_ref; | |
55 | int rx_ring_ref; | |
56 | struct netif_tx_sring *txs; | |
57 | struct netif_rx_sring *rxs; | |
58 | netif_tx_back_ring_t tx_ring; | |
59 | netif_rx_back_ring_t rx_ring; | |
658788c5 MM |
60 | NICConf conf; |
61 | NICState *nic; | |
e613b064 AL |
62 | }; |
63 | ||
64 | /* ------------------------------------------------------------- */ | |
65 | ||
66 | static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st) | |
67 | { | |
68 | RING_IDX i = netdev->tx_ring.rsp_prod_pvt; | |
69 | netif_tx_response_t *resp; | |
70 | int notify; | |
71 | ||
72 | resp = RING_GET_RESPONSE(&netdev->tx_ring, i); | |
73 | resp->id = txp->id; | |
74 | resp->status = st; | |
75 | ||
76 | #if 0 | |
77 | if (txp->flags & NETTXF_extra_info) | |
78 | RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; | |
79 | #endif | |
80 | ||
81 | netdev->tx_ring.rsp_prod_pvt = ++i; | |
82 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify); | |
83 | if (notify) | |
84 | xen_be_send_notify(&netdev->xendev); | |
85 | ||
86 | if (i == netdev->tx_ring.req_cons) { | |
87 | int more_to_do; | |
88 | RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); | |
89 | if (more_to_do) | |
90 | netdev->tx_work++; | |
91 | } | |
92 | } | |
93 | ||
94 | static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end) | |
95 | { | |
96 | #if 0 | |
97 | /* | |
98 | * Hmm, why netback fails everything in the ring? | |
99 | * Should we do that even when not supporting SG and TSO? | |
100 | */ | |
101 | RING_IDX cons = netdev->tx_ring.req_cons; | |
102 | ||
103 | do { | |
104 | make_tx_response(netif, txp, NETIF_RSP_ERROR); | |
105 | if (cons >= end) | |
106 | break; | |
107 | txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); | |
108 | } while (1); | |
109 | netdev->tx_ring.req_cons = cons; | |
110 | netif_schedule_work(netif); | |
111 | netif_put(netif); | |
112 | #else | |
113 | net_tx_response(netdev, txp, NETIF_RSP_ERROR); | |
114 | #endif | |
115 | } | |
116 | ||
117 | static void net_tx_packets(struct XenNetDev *netdev) | |
118 | { | |
119 | netif_tx_request_t txreq; | |
120 | RING_IDX rc, rp; | |
121 | void *page; | |
122 | void *tmpbuf = NULL; | |
123 | ||
124 | for (;;) { | |
125 | rc = netdev->tx_ring.req_cons; | |
126 | rp = netdev->tx_ring.sring->req_prod; | |
127 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
128 | ||
129 | while ((rc != rp)) { | |
130 | if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) | |
131 | break; | |
132 | memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); | |
133 | netdev->tx_ring.req_cons = ++rc; | |
134 | ||
135 | #if 1 | |
136 | /* should not happen in theory, we don't announce the * | |
137 | * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ | |
138 | if (txreq.flags & NETTXF_extra_info) { | |
139 | xen_be_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); | |
140 | net_tx_error(netdev, &txreq, rc); | |
141 | continue; | |
142 | } | |
143 | if (txreq.flags & NETTXF_more_data) { | |
144 | xen_be_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); | |
145 | net_tx_error(netdev, &txreq, rc); | |
146 | continue; | |
147 | } | |
148 | #endif | |
149 | ||
150 | if (txreq.size < 14) { | |
151 | xen_be_printf(&netdev->xendev, 0, "bad packet size: %d\n", txreq.size); | |
152 | net_tx_error(netdev, &txreq, rc); | |
153 | continue; | |
154 | } | |
155 | ||
156 | if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { | |
157 | xen_be_printf(&netdev->xendev, 0, "error: page crossing\n"); | |
158 | net_tx_error(netdev, &txreq, rc); | |
159 | continue; | |
160 | } | |
161 | ||
162 | xen_be_printf(&netdev->xendev, 3, "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", | |
163 | txreq.gref, txreq.offset, txreq.size, txreq.flags, | |
164 | (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", | |
165 | (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", | |
166 | (txreq.flags & NETTXF_more_data) ? " more_data" : "", | |
167 | (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); | |
168 | ||
169 | page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, | |
170 | netdev->xendev.dom, | |
171 | txreq.gref, PROT_READ); | |
172 | if (page == NULL) { | |
173 | xen_be_printf(&netdev->xendev, 0, "error: tx gref dereference failed (%d)\n", | |
174 | txreq.gref); | |
175 | net_tx_error(netdev, &txreq, rc); | |
176 | continue; | |
177 | } | |
178 | if (txreq.flags & NETTXF_csum_blank) { | |
179 | /* have read-only mapping -> can't fill checksum in-place */ | |
180 | if (!tmpbuf) | |
682aea0e | 181 | tmpbuf = qemu_malloc(XC_PAGE_SIZE); |
e613b064 AL |
182 | memcpy(tmpbuf, page + txreq.offset, txreq.size); |
183 | net_checksum_calculate(tmpbuf, txreq.size); | |
658788c5 | 184 | qemu_send_packet(&netdev->nic->nc, tmpbuf, txreq.size); |
e613b064 | 185 | } else { |
658788c5 | 186 | qemu_send_packet(&netdev->nic->nc, page + txreq.offset, txreq.size); |
e613b064 AL |
187 | } |
188 | xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1); | |
189 | net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); | |
190 | } | |
191 | if (!netdev->tx_work) | |
192 | break; | |
193 | netdev->tx_work = 0; | |
194 | } | |
7105b056 | 195 | qemu_free(tmpbuf); |
e613b064 AL |
196 | } |
197 | ||
198 | /* ------------------------------------------------------------- */ | |
199 | ||
200 | static void net_rx_response(struct XenNetDev *netdev, | |
201 | netif_rx_request_t *req, int8_t st, | |
202 | uint16_t offset, uint16_t size, | |
203 | uint16_t flags) | |
204 | { | |
205 | RING_IDX i = netdev->rx_ring.rsp_prod_pvt; | |
206 | netif_rx_response_t *resp; | |
207 | int notify; | |
208 | ||
209 | resp = RING_GET_RESPONSE(&netdev->rx_ring, i); | |
210 | resp->offset = offset; | |
211 | resp->flags = flags; | |
212 | resp->id = req->id; | |
213 | resp->status = (int16_t)size; | |
214 | if (st < 0) | |
215 | resp->status = (int16_t)st; | |
216 | ||
217 | xen_be_printf(&netdev->xendev, 3, "rx response: idx %d, status %d, flags 0x%x\n", | |
218 | i, resp->status, resp->flags); | |
219 | ||
220 | netdev->rx_ring.rsp_prod_pvt = ++i; | |
221 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify); | |
222 | if (notify) | |
223 | xen_be_send_notify(&netdev->xendev); | |
224 | } | |
225 | ||
226 | #define NET_IP_ALIGN 2 | |
227 | ||
658788c5 | 228 | static int net_rx_ok(VLANClientState *nc) |
e613b064 | 229 | { |
658788c5 | 230 | struct XenNetDev *netdev = DO_UPCAST(NICState, nc, nc)->opaque; |
e613b064 AL |
231 | RING_IDX rc, rp; |
232 | ||
233 | if (netdev->xendev.be_state != XenbusStateConnected) | |
234 | return 0; | |
235 | ||
236 | rc = netdev->rx_ring.req_cons; | |
237 | rp = netdev->rx_ring.sring->req_prod; | |
238 | xen_rmb(); | |
239 | ||
240 | if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { | |
241 | xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n", | |
242 | __FUNCTION__, rc, rp); | |
243 | return 0; | |
244 | } | |
245 | return 1; | |
246 | } | |
247 | ||
658788c5 | 248 | static ssize_t net_rx_packet(VLANClientState *nc, const uint8_t *buf, size_t size) |
e613b064 | 249 | { |
658788c5 | 250 | struct XenNetDev *netdev = DO_UPCAST(NICState, nc, nc)->opaque; |
e613b064 AL |
251 | netif_rx_request_t rxreq; |
252 | RING_IDX rc, rp; | |
253 | void *page; | |
254 | ||
255 | if (netdev->xendev.be_state != XenbusStateConnected) | |
4f1c942b | 256 | return -1; |
e613b064 AL |
257 | |
258 | rc = netdev->rx_ring.req_cons; | |
259 | rp = netdev->rx_ring.sring->req_prod; | |
260 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
261 | ||
262 | if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { | |
263 | xen_be_printf(&netdev->xendev, 2, "no buffer, drop packet\n"); | |
4f1c942b | 264 | return -1; |
e613b064 AL |
265 | } |
266 | if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { | |
cda9046b MM |
267 | xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", |
268 | (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); | |
4f1c942b | 269 | return -1; |
e613b064 AL |
270 | } |
271 | ||
272 | memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); | |
273 | netdev->rx_ring.req_cons = ++rc; | |
274 | ||
275 | page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, | |
276 | netdev->xendev.dom, | |
277 | rxreq.gref, PROT_WRITE); | |
278 | if (page == NULL) { | |
279 | xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n", | |
280 | rxreq.gref); | |
281 | net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); | |
4f1c942b | 282 | return -1; |
e613b064 AL |
283 | } |
284 | memcpy(page + NET_IP_ALIGN, buf, size); | |
285 | xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1); | |
286 | net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0); | |
4f1c942b MM |
287 | |
288 | return size; | |
e613b064 AL |
289 | } |
290 | ||
291 | /* ------------------------------------------------------------- */ | |
292 | ||
658788c5 MM |
293 | static NetClientInfo net_xen_info = { |
294 | .type = NET_CLIENT_TYPE_NIC, | |
295 | .size = sizeof(NICState), | |
296 | .can_receive = net_rx_ok, | |
297 | .receive = net_rx_packet, | |
298 | }; | |
299 | ||
e613b064 AL |
300 | static int net_init(struct XenDevice *xendev) |
301 | { | |
302 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
e613b064 AL |
303 | |
304 | /* read xenstore entries */ | |
305 | if (netdev->mac == NULL) | |
306 | netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); | |
307 | ||
308 | /* do we have all we need? */ | |
309 | if (netdev->mac == NULL) | |
310 | return -1; | |
311 | ||
658788c5 MM |
312 | if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) |
313 | return -1; | |
314 | ||
315 | netdev->conf.vlan = qemu_find_vlan(netdev->xendev.dev, 1); | |
316 | netdev->conf.peer = NULL; | |
317 | ||
318 | netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf, | |
319 | "xen", NULL, netdev); | |
320 | ||
321 | snprintf(netdev->nic->nc.info_str, sizeof(netdev->nic->nc.info_str), | |
e613b064 AL |
322 | "nic: xenbus vif macaddr=%s", netdev->mac); |
323 | ||
324 | /* fill info */ | |
325 | xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1); | |
326 | xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0); | |
327 | ||
328 | return 0; | |
329 | } | |
330 | ||
331 | static int net_connect(struct XenDevice *xendev) | |
332 | { | |
333 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
334 | int rx_copy; | |
335 | ||
336 | if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref", | |
337 | &netdev->tx_ring_ref) == -1) | |
338 | return -1; | |
339 | if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref", | |
340 | &netdev->rx_ring_ref) == -1) | |
341 | return 1; | |
342 | if (xenstore_read_fe_int(&netdev->xendev, "event-channel", | |
343 | &netdev->xendev.remote_port) == -1) | |
344 | return -1; | |
345 | ||
346 | if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) | |
347 | rx_copy = 0; | |
348 | if (rx_copy == 0) { | |
349 | xen_be_printf(&netdev->xendev, 0, "frontend doesn't support rx-copy.\n"); | |
350 | return -1; | |
351 | } | |
352 | ||
353 | netdev->txs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, | |
354 | netdev->xendev.dom, | |
355 | netdev->tx_ring_ref, | |
356 | PROT_READ | PROT_WRITE); | |
357 | netdev->rxs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, | |
358 | netdev->xendev.dom, | |
359 | netdev->rx_ring_ref, | |
360 | PROT_READ | PROT_WRITE); | |
361 | if (!netdev->txs || !netdev->rxs) | |
362 | return -1; | |
363 | BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); | |
364 | BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); | |
365 | ||
366 | xen_be_bind_evtchn(&netdev->xendev); | |
367 | ||
368 | xen_be_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, " | |
369 | "remote port %d, local port %d\n", | |
370 | netdev->tx_ring_ref, netdev->rx_ring_ref, | |
371 | netdev->xendev.remote_port, netdev->xendev.local_port); | |
3e3cabcf GH |
372 | |
373 | net_tx_packets(netdev); | |
e613b064 AL |
374 | return 0; |
375 | } | |
376 | ||
377 | static void net_disconnect(struct XenDevice *xendev) | |
378 | { | |
379 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
380 | ||
381 | xen_be_unbind_evtchn(&netdev->xendev); | |
382 | ||
383 | if (netdev->txs) { | |
384 | xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->txs, 1); | |
385 | netdev->txs = NULL; | |
386 | } | |
387 | if (netdev->rxs) { | |
388 | xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->rxs, 1); | |
389 | netdev->rxs = NULL; | |
390 | } | |
658788c5 MM |
391 | if (netdev->nic) { |
392 | qemu_del_vlan_client(&netdev->nic->nc); | |
393 | netdev->nic = NULL; | |
e613b064 AL |
394 | } |
395 | } | |
396 | ||
397 | static void net_event(struct XenDevice *xendev) | |
398 | { | |
399 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
400 | net_tx_packets(netdev); | |
401 | } | |
402 | ||
403 | static int net_free(struct XenDevice *xendev) | |
404 | { | |
405 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
406 | ||
407 | qemu_free(netdev->mac); | |
408 | return 0; | |
409 | } | |
410 | ||
411 | /* ------------------------------------------------------------- */ | |
412 | ||
413 | struct XenDevOps xen_netdev_ops = { | |
414 | .size = sizeof(struct XenNetDev), | |
415 | .flags = DEVOPS_FLAG_NEED_GNTDEV, | |
416 | .init = net_init, | |
417 | .connect = net_connect, | |
418 | .event = net_event, | |
419 | .disconnect = net_disconnect, | |
420 | .free = net_free, | |
421 | }; |