]>
Commit | Line | Data |
---|---|---|
e613b064 AL |
1 | /* |
2 | * xen paravirt network card backend | |
3 | * | |
4 | * (c) Gerd Hoffmann <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; under version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
8167ee88 | 16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
6b620ca3 PB |
17 | * |
18 | * Contributions after 2012-01-13 are licensed under the terms of the | |
19 | * GNU GPL, version 2 or (at your option) any later version. | |
e613b064 AL |
20 | */ |
21 | ||
21cbfe5f | 22 | #include "qemu/osdep.h" |
e613b064 AL |
23 | #include <sys/socket.h> |
24 | #include <sys/ioctl.h> | |
e613b064 | 25 | #include <sys/wait.h> |
e613b064 | 26 | |
83c9f4ca | 27 | #include "hw/hw.h" |
1422e32d | 28 | #include "net/net.h" |
7200ac3c | 29 | #include "net/checksum.h" |
658788c5 | 30 | #include "net/util.h" |
0d09e41a | 31 | #include "hw/xen/xen_backend.h" |
e613b064 | 32 | |
b41f6719 AP |
33 | #include <xen/io/netif.h> |
34 | ||
e613b064 AL |
35 | /* ------------------------------------------------------------- */ |
36 | ||
37 | struct XenNetDev { | |
38 | struct XenDevice xendev; /* must be first */ | |
39 | char *mac; | |
40 | int tx_work; | |
41 | int tx_ring_ref; | |
42 | int rx_ring_ref; | |
43 | struct netif_tx_sring *txs; | |
44 | struct netif_rx_sring *rxs; | |
45 | netif_tx_back_ring_t tx_ring; | |
46 | netif_rx_back_ring_t rx_ring; | |
658788c5 MM |
47 | NICConf conf; |
48 | NICState *nic; | |
e613b064 AL |
49 | }; |
50 | ||
51 | /* ------------------------------------------------------------- */ | |
52 | ||
53 | static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st) | |
54 | { | |
55 | RING_IDX i = netdev->tx_ring.rsp_prod_pvt; | |
56 | netif_tx_response_t *resp; | |
57 | int notify; | |
58 | ||
59 | resp = RING_GET_RESPONSE(&netdev->tx_ring, i); | |
60 | resp->id = txp->id; | |
61 | resp->status = st; | |
62 | ||
63 | #if 0 | |
209cd7ab AP |
64 | if (txp->flags & NETTXF_extra_info) { |
65 | RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; | |
66 | } | |
e613b064 AL |
67 | #endif |
68 | ||
69 | netdev->tx_ring.rsp_prod_pvt = ++i; | |
70 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify); | |
209cd7ab | 71 | if (notify) { |
ba18fa2a | 72 | xen_pv_send_notify(&netdev->xendev); |
209cd7ab | 73 | } |
e613b064 AL |
74 | |
75 | if (i == netdev->tx_ring.req_cons) { | |
209cd7ab AP |
76 | int more_to_do; |
77 | RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); | |
78 | if (more_to_do) { | |
79 | netdev->tx_work++; | |
80 | } | |
e613b064 AL |
81 | } |
82 | } | |
83 | ||
84 | static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end) | |
85 | { | |
86 | #if 0 | |
87 | /* | |
88 | * Hmm, why netback fails everything in the ring? | |
89 | * Should we do that even when not supporting SG and TSO? | |
90 | */ | |
91 | RING_IDX cons = netdev->tx_ring.req_cons; | |
92 | ||
93 | do { | |
209cd7ab AP |
94 | make_tx_response(netif, txp, NETIF_RSP_ERROR); |
95 | if (cons >= end) { | |
96 | break; | |
97 | } | |
98 | txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); | |
e613b064 AL |
99 | } while (1); |
100 | netdev->tx_ring.req_cons = cons; | |
101 | netif_schedule_work(netif); | |
102 | netif_put(netif); | |
103 | #else | |
104 | net_tx_response(netdev, txp, NETIF_RSP_ERROR); | |
105 | #endif | |
106 | } | |
107 | ||
108 | static void net_tx_packets(struct XenNetDev *netdev) | |
109 | { | |
110 | netif_tx_request_t txreq; | |
111 | RING_IDX rc, rp; | |
112 | void *page; | |
113 | void *tmpbuf = NULL; | |
114 | ||
115 | for (;;) { | |
209cd7ab AP |
116 | rc = netdev->tx_ring.req_cons; |
117 | rp = netdev->tx_ring.sring->req_prod; | |
118 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
e613b064 | 119 | |
209cd7ab AP |
120 | while ((rc != rp)) { |
121 | if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) { | |
122 | break; | |
123 | } | |
124 | memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); | |
125 | netdev->tx_ring.req_cons = ++rc; | |
e613b064 AL |
126 | |
127 | #if 1 | |
209cd7ab AP |
128 | /* should not happen in theory, we don't announce the * |
129 | * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ | |
130 | if (txreq.flags & NETTXF_extra_info) { | |
96c77dba | 131 | xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); |
209cd7ab AP |
132 | net_tx_error(netdev, &txreq, rc); |
133 | continue; | |
134 | } | |
135 | if (txreq.flags & NETTXF_more_data) { | |
96c77dba | 136 | xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); |
209cd7ab AP |
137 | net_tx_error(netdev, &txreq, rc); |
138 | continue; | |
139 | } | |
e613b064 AL |
140 | #endif |
141 | ||
209cd7ab | 142 | if (txreq.size < 14) { |
96c77dba | 143 | xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n", |
b9730c5b | 144 | txreq.size); |
209cd7ab AP |
145 | net_tx_error(netdev, &txreq, rc); |
146 | continue; | |
147 | } | |
148 | ||
149 | if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { | |
96c77dba | 150 | xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n"); |
209cd7ab AP |
151 | net_tx_error(netdev, &txreq, rc); |
152 | continue; | |
153 | } | |
154 | ||
96c77dba | 155 | xen_pv_printf(&netdev->xendev, 3, |
c22e91b1 | 156 | "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", |
209cd7ab AP |
157 | txreq.gref, txreq.offset, txreq.size, txreq.flags, |
158 | (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", | |
159 | (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", | |
160 | (txreq.flags & NETTXF_more_data) ? " more_data" : "", | |
161 | (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); | |
162 | ||
58560f2a PD |
163 | page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref, |
164 | PROT_READ); | |
209cd7ab | 165 | if (page == NULL) { |
96c77dba | 166 | xen_pv_printf(&netdev->xendev, 0, |
c22e91b1 EC |
167 | "error: tx gref dereference failed (%d)\n", |
168 | txreq.gref); | |
209cd7ab AP |
169 | net_tx_error(netdev, &txreq, rc); |
170 | continue; | |
171 | } | |
172 | if (txreq.flags & NETTXF_csum_blank) { | |
e613b064 | 173 | /* have read-only mapping -> can't fill checksum in-place */ |
209cd7ab | 174 | if (!tmpbuf) { |
7267c094 | 175 | tmpbuf = g_malloc(XC_PAGE_SIZE); |
209cd7ab | 176 | } |
e613b064 | 177 | memcpy(tmpbuf, page + txreq.offset, txreq.size); |
209cd7ab | 178 | net_checksum_calculate(tmpbuf, txreq.size); |
b356f76d JW |
179 | qemu_send_packet(qemu_get_queue(netdev->nic), tmpbuf, |
180 | txreq.size); | |
e613b064 | 181 | } else { |
b356f76d JW |
182 | qemu_send_packet(qemu_get_queue(netdev->nic), |
183 | page + txreq.offset, txreq.size); | |
e613b064 | 184 | } |
58560f2a | 185 | xen_be_unmap_grant_ref(&netdev->xendev, page); |
209cd7ab AP |
186 | net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); |
187 | } | |
188 | if (!netdev->tx_work) { | |
189 | break; | |
190 | } | |
191 | netdev->tx_work = 0; | |
e613b064 | 192 | } |
7267c094 | 193 | g_free(tmpbuf); |
e613b064 AL |
194 | } |
195 | ||
196 | /* ------------------------------------------------------------- */ | |
197 | ||
198 | static void net_rx_response(struct XenNetDev *netdev, | |
209cd7ab AP |
199 | netif_rx_request_t *req, int8_t st, |
200 | uint16_t offset, uint16_t size, | |
201 | uint16_t flags) | |
e613b064 AL |
202 | { |
203 | RING_IDX i = netdev->rx_ring.rsp_prod_pvt; | |
204 | netif_rx_response_t *resp; | |
205 | int notify; | |
206 | ||
207 | resp = RING_GET_RESPONSE(&netdev->rx_ring, i); | |
208 | resp->offset = offset; | |
209 | resp->flags = flags; | |
210 | resp->id = req->id; | |
211 | resp->status = (int16_t)size; | |
209cd7ab AP |
212 | if (st < 0) { |
213 | resp->status = (int16_t)st; | |
214 | } | |
e613b064 | 215 | |
96c77dba | 216 | xen_pv_printf(&netdev->xendev, 3, |
b9730c5b | 217 | "rx response: idx %d, status %d, flags 0x%x\n", |
209cd7ab | 218 | i, resp->status, resp->flags); |
e613b064 AL |
219 | |
220 | netdev->rx_ring.rsp_prod_pvt = ++i; | |
221 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify); | |
209cd7ab | 222 | if (notify) { |
ba18fa2a | 223 | xen_pv_send_notify(&netdev->xendev); |
209cd7ab | 224 | } |
e613b064 AL |
225 | } |
226 | ||
227 | #define NET_IP_ALIGN 2 | |
228 | ||
4e68f7a0 | 229 | static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size) |
e613b064 | 230 | { |
cc1f0f45 | 231 | struct XenNetDev *netdev = qemu_get_nic_opaque(nc); |
e613b064 AL |
232 | netif_rx_request_t rxreq; |
233 | RING_IDX rc, rp; | |
234 | void *page; | |
235 | ||
209cd7ab AP |
236 | if (netdev->xendev.be_state != XenbusStateConnected) { |
237 | return -1; | |
238 | } | |
e613b064 AL |
239 | |
240 | rc = netdev->rx_ring.req_cons; | |
241 | rp = netdev->rx_ring.sring->req_prod; | |
242 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
243 | ||
244 | if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { | |
7bba83bf | 245 | return 0; |
e613b064 AL |
246 | } |
247 | if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { | |
96c77dba | 248 | xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", |
209cd7ab AP |
249 | (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); |
250 | return -1; | |
e613b064 AL |
251 | } |
252 | ||
253 | memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); | |
254 | netdev->rx_ring.req_cons = ++rc; | |
255 | ||
58560f2a | 256 | page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE); |
e613b064 | 257 | if (page == NULL) { |
96c77dba | 258 | xen_pv_printf(&netdev->xendev, 0, |
b9730c5b | 259 | "error: rx gref dereference failed (%d)\n", |
e613b064 | 260 | rxreq.gref); |
209cd7ab AP |
261 | net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); |
262 | return -1; | |
e613b064 AL |
263 | } |
264 | memcpy(page + NET_IP_ALIGN, buf, size); | |
58560f2a | 265 | xen_be_unmap_grant_ref(&netdev->xendev, page); |
e613b064 | 266 | net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0); |
4f1c942b MM |
267 | |
268 | return size; | |
e613b064 AL |
269 | } |
270 | ||
271 | /* ------------------------------------------------------------- */ | |
272 | ||
658788c5 | 273 | static NetClientInfo net_xen_info = { |
f394b2e2 | 274 | .type = NET_CLIENT_DRIVER_NIC, |
658788c5 | 275 | .size = sizeof(NICState), |
658788c5 MM |
276 | .receive = net_rx_packet, |
277 | }; | |
278 | ||
e613b064 AL |
279 | static int net_init(struct XenDevice *xendev) |
280 | { | |
281 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
e613b064 AL |
282 | |
283 | /* read xenstore entries */ | |
209cd7ab AP |
284 | if (netdev->mac == NULL) { |
285 | netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); | |
286 | } | |
e613b064 AL |
287 | |
288 | /* do we have all we need? */ | |
209cd7ab AP |
289 | if (netdev->mac == NULL) { |
290 | return -1; | |
291 | } | |
e613b064 | 292 | |
209cd7ab | 293 | if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) { |
658788c5 | 294 | return -1; |
209cd7ab | 295 | } |
658788c5 | 296 | |
658788c5 MM |
297 | netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf, |
298 | "xen", NULL, netdev); | |
299 | ||
b356f76d JW |
300 | snprintf(qemu_get_queue(netdev->nic)->info_str, |
301 | sizeof(qemu_get_queue(netdev->nic)->info_str), | |
e613b064 AL |
302 | "nic: xenbus vif macaddr=%s", netdev->mac); |
303 | ||
304 | /* fill info */ | |
305 | xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1); | |
306 | xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0); | |
307 | ||
308 | return 0; | |
309 | } | |
310 | ||
311 | static int net_connect(struct XenDevice *xendev) | |
312 | { | |
313 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
314 | int rx_copy; | |
315 | ||
316 | if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref", | |
209cd7ab AP |
317 | &netdev->tx_ring_ref) == -1) { |
318 | return -1; | |
319 | } | |
e613b064 | 320 | if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref", |
209cd7ab AP |
321 | &netdev->rx_ring_ref) == -1) { |
322 | return 1; | |
323 | } | |
e613b064 | 324 | if (xenstore_read_fe_int(&netdev->xendev, "event-channel", |
209cd7ab AP |
325 | &netdev->xendev.remote_port) == -1) { |
326 | return -1; | |
327 | } | |
e613b064 | 328 | |
209cd7ab AP |
329 | if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) { |
330 | rx_copy = 0; | |
331 | } | |
e613b064 | 332 | if (rx_copy == 0) { |
96c77dba | 333 | xen_pv_printf(&netdev->xendev, 0, |
b9730c5b | 334 | "frontend doesn't support rx-copy.\n"); |
209cd7ab | 335 | return -1; |
e613b064 AL |
336 | } |
337 | ||
58560f2a PD |
338 | netdev->txs = xen_be_map_grant_ref(&netdev->xendev, |
339 | netdev->tx_ring_ref, | |
340 | PROT_READ | PROT_WRITE); | |
b4f72e31 CG |
341 | if (!netdev->txs) { |
342 | return -1; | |
343 | } | |
58560f2a PD |
344 | netdev->rxs = xen_be_map_grant_ref(&netdev->xendev, |
345 | netdev->rx_ring_ref, | |
346 | PROT_READ | PROT_WRITE); | |
b4f72e31 | 347 | if (!netdev->rxs) { |
58560f2a | 348 | xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); |
b4f72e31 | 349 | netdev->txs = NULL; |
209cd7ab AP |
350 | return -1; |
351 | } | |
e613b064 AL |
352 | BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); |
353 | BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); | |
354 | ||
355 | xen_be_bind_evtchn(&netdev->xendev); | |
356 | ||
96c77dba | 357 | xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, " |
209cd7ab AP |
358 | "remote port %d, local port %d\n", |
359 | netdev->tx_ring_ref, netdev->rx_ring_ref, | |
360 | netdev->xendev.remote_port, netdev->xendev.local_port); | |
3e3cabcf GH |
361 | |
362 | net_tx_packets(netdev); | |
e613b064 AL |
363 | return 0; |
364 | } | |
365 | ||
366 | static void net_disconnect(struct XenDevice *xendev) | |
367 | { | |
368 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
369 | ||
65807f4b | 370 | xen_pv_unbind_evtchn(&netdev->xendev); |
e613b064 AL |
371 | |
372 | if (netdev->txs) { | |
58560f2a | 373 | xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); |
209cd7ab | 374 | netdev->txs = NULL; |
e613b064 AL |
375 | } |
376 | if (netdev->rxs) { | |
58560f2a | 377 | xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs); |
209cd7ab | 378 | netdev->rxs = NULL; |
e613b064 | 379 | } |
e613b064 AL |
380 | } |
381 | ||
382 | static void net_event(struct XenDevice *xendev) | |
383 | { | |
384 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
385 | net_tx_packets(netdev); | |
b356f76d | 386 | qemu_flush_queued_packets(qemu_get_queue(netdev->nic)); |
e613b064 AL |
387 | } |
388 | ||
389 | static int net_free(struct XenDevice *xendev) | |
390 | { | |
391 | struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); | |
392 | ||
d4685837 CG |
393 | if (netdev->nic) { |
394 | qemu_del_nic(netdev->nic); | |
395 | netdev->nic = NULL; | |
396 | } | |
7267c094 | 397 | g_free(netdev->mac); |
a39d97c7 | 398 | netdev->mac = NULL; |
e613b064 AL |
399 | return 0; |
400 | } | |
401 | ||
402 | /* ------------------------------------------------------------- */ | |
403 | ||
404 | struct XenDevOps xen_netdev_ops = { | |
405 | .size = sizeof(struct XenNetDev), | |
406 | .flags = DEVOPS_FLAG_NEED_GNTDEV, | |
407 | .init = net_init, | |
384087b2 | 408 | .initialise = net_connect, |
e613b064 AL |
409 | .event = net_event, |
410 | .disconnect = net_disconnect, | |
411 | .free = net_free, | |
412 | }; |