]>
Commit | Line | Data |
---|---|---|
8d90ad90 DG |
1 | /* |
2 | * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator | |
3 | * | |
4 | * PAPR Inter-VM Logical Lan, aka ibmveth | |
5 | * | |
6 | * Copyright (c) 2010,2011 David Gibson, IBM Corporation. | |
7 | * | |
8 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
9 | * of this software and associated documentation files (the "Software"), to deal | |
10 | * in the Software without restriction, including without limitation the rights | |
11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
12 | * copies of the Software, and to permit persons to whom the Software is | |
13 | * furnished to do so, subject to the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice shall be included in | |
16 | * all copies or substantial portions of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
24 | * THE SOFTWARE. | |
25 | * | |
26 | */ | |
0b8fa32f | 27 | |
0d75590d | 28 | #include "qemu/osdep.h" |
4771d756 | 29 | #include "cpu.h" |
03dd024f | 30 | #include "qemu/log.h" |
0b8fa32f | 31 | #include "qemu/module.h" |
1422e32d | 32 | #include "net/net.h" |
d6454270 | 33 | #include "migration/vmstate.h" |
0d09e41a PB |
34 | #include "hw/ppc/spapr.h" |
35 | #include "hw/ppc/spapr_vio.h" | |
a27bd6c7 | 36 | #include "hw/qdev-properties.h" |
ad4f62d0 | 37 | #include "sysemu/sysemu.h" |
e8bb33de | 38 | #include "trace.h" |
8d90ad90 DG |
39 | |
40 | #include <libfdt.h> | |
db1015e9 | 41 | #include "qom/object.h" |
8d90ad90 DG |
42 | |
43 | #define ETH_ALEN 6 | |
44 | #define MAX_PACKET_SIZE 65536 | |
45 | ||
831e8822 TH |
46 | /* Compatibility flags for migration */ |
47 | #define SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT 0 | |
48 | #define SPAPRVLAN_FLAG_RX_BUF_POOLS (1 << SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT) | |
49 | ||
8d90ad90 DG |
50 | /* |
51 | * Virtual LAN device | |
52 | */ | |
53 | ||
54 | typedef uint64_t vlan_bd_t; | |
55 | ||
56 | #define VLAN_BD_VALID 0x8000000000000000ULL | |
57 | #define VLAN_BD_TOGGLE 0x4000000000000000ULL | |
58 | #define VLAN_BD_NO_CSUM 0x0200000000000000ULL | |
59 | #define VLAN_BD_CSUM_GOOD 0x0100000000000000ULL | |
60 | #define VLAN_BD_LEN_MASK 0x00ffffff00000000ULL | |
61 | #define VLAN_BD_LEN(bd) (((bd) & VLAN_BD_LEN_MASK) >> 32) | |
62 | #define VLAN_BD_ADDR_MASK 0x00000000ffffffffULL | |
63 | #define VLAN_BD_ADDR(bd) ((bd) & VLAN_BD_ADDR_MASK) | |
64 | ||
65 | #define VLAN_VALID_BD(addr, len) (VLAN_BD_VALID | \ | |
66 | (((len) << 32) & VLAN_BD_LEN_MASK) | \ | |
67 | (addr & VLAN_BD_ADDR_MASK)) | |
68 | ||
69 | #define VLAN_RXQC_TOGGLE 0x80 | |
70 | #define VLAN_RXQC_VALID 0x40 | |
71 | #define VLAN_RXQC_NO_CSUM 0x02 | |
72 | #define VLAN_RXQC_CSUM_GOOD 0x01 | |
73 | ||
74 | #define VLAN_RQ_ALIGNMENT 16 | |
75 | #define VLAN_RXQ_BD_OFF 0 | |
76 | #define VLAN_FILTER_BD_OFF 8 | |
77 | #define VLAN_RX_BDS_OFF 16 | |
439ce140 AB |
78 | /* |
79 | * The final 8 bytes of the buffer list is a counter of frames dropped | |
80 | * because there was not a buffer in the buffer list capable of holding | |
81 | * the frame. We must avoid it, or the operating system will report garbage | |
82 | * for this statistic. | |
83 | */ | |
84 | #define VLAN_RX_BDS_LEN (SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF - 8) | |
85 | #define VLAN_MAX_BUFS (VLAN_RX_BDS_LEN / 8) | |
8d90ad90 | 86 | |
fd506b4f | 87 | #define TYPE_VIO_SPAPR_VLAN_DEVICE "spapr-vlan" |
8063396b | 88 | OBJECT_DECLARE_SIMPLE_TYPE(SpaprVioVlan, VIO_SPAPR_VLAN_DEVICE) |
fd506b4f | 89 | |
831e8822 TH |
90 | #define RX_POOL_MAX_BDS 4096 |
91 | #define RX_MAX_POOLS 5 | |
92 | ||
93 | typedef struct { | |
94 | int32_t bufsize; | |
95 | int32_t count; | |
96 | vlan_bd_t bds[RX_POOL_MAX_BDS]; | |
97 | } RxBufPool; | |
98 | ||
db1015e9 | 99 | struct SpaprVioVlan { |
ce2918cb | 100 | SpaprVioDevice sdev; |
8d90ad90 DG |
101 | NICConf nicconf; |
102 | NICState *nic; | |
32f5f50d | 103 | MACAddr perm_mac; |
686fefe4 | 104 | bool isopen; |
cbd62f86 | 105 | hwaddr buf_list; |
686fefe4 | 106 | uint32_t add_buf_ptr, use_buf_ptr, rx_bufs; |
cbd62f86 | 107 | hwaddr rxq_ptr; |
8836630f | 108 | QEMUTimer *rxp_timer; |
b12227af | 109 | uint32_t compat_flags; /* Compatibility flags for migration */ |
831e8822 | 110 | RxBufPool *rx_pool[RX_MAX_POOLS]; /* Receive buffer descriptor pools */ |
db1015e9 | 111 | }; |
8d90ad90 | 112 | |
b8c4b67e | 113 | static bool spapr_vlan_can_receive(NetClientState *nc) |
8d90ad90 | 114 | { |
ce2918cb | 115 | SpaprVioVlan *dev = qemu_get_nic_opaque(nc); |
8d90ad90 | 116 | |
b8c4b67e | 117 | return dev->isopen && dev->rx_bufs > 0; |
8d90ad90 DG |
118 | } |
119 | ||
5c29dd8c TH |
120 | /** |
121 | * The last 8 bytes of the receive buffer list page (that has been | |
122 | * supplied by the guest with the H_REGISTER_LOGICAL_LAN call) contain | |
123 | * a counter for frames that have been dropped because there was no | |
124 | * suitable receive buffer available. This function is used to increase | |
125 | * this counter by one. | |
126 | */ | |
ce2918cb | 127 | static void spapr_vlan_record_dropped_rx_frame(SpaprVioVlan *dev) |
5c29dd8c TH |
128 | { |
129 | uint64_t cnt; | |
130 | ||
131 | cnt = vio_ldq(&dev->sdev, dev->buf_list + 4096 - 8); | |
132 | vio_stq(&dev->sdev, dev->buf_list + 4096 - 8, cnt + 1); | |
133 | } | |
134 | ||
831e8822 TH |
135 | /** |
136 | * Get buffer descriptor from one of our receive buffer pools | |
137 | */ | |
ce2918cb | 138 | static vlan_bd_t spapr_vlan_get_rx_bd_from_pool(SpaprVioVlan *dev, |
831e8822 TH |
139 | size_t size) |
140 | { | |
141 | vlan_bd_t bd; | |
142 | int pool; | |
143 | ||
144 | for (pool = 0; pool < RX_MAX_POOLS; pool++) { | |
145 | if (dev->rx_pool[pool]->count > 0 && | |
146 | dev->rx_pool[pool]->bufsize >= size + 8) { | |
147 | break; | |
148 | } | |
149 | } | |
150 | if (pool == RX_MAX_POOLS) { | |
151 | /* Failed to find a suitable buffer */ | |
152 | return 0; | |
153 | } | |
154 | ||
e8bb33de LV |
155 | |
156 | trace_spapr_vlan_get_rx_bd_from_pool_found(pool, | |
157 | dev->rx_pool[pool]->count, | |
158 | dev->rx_bufs); | |
831e8822 TH |
159 | |
160 | /* Remove the buffer from the pool */ | |
161 | dev->rx_pool[pool]->count--; | |
162 | bd = dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count]; | |
163 | dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count] = 0; | |
164 | ||
165 | return bd; | |
166 | } | |
167 | ||
d6f39fdf TH |
168 | /** |
169 | * Get buffer descriptor from the receive buffer list page that has been | |
170 | * supplied by the guest with the H_REGISTER_LOGICAL_LAN call | |
171 | */ | |
ce2918cb | 172 | static vlan_bd_t spapr_vlan_get_rx_bd_from_page(SpaprVioVlan *dev, |
d6f39fdf TH |
173 | size_t size) |
174 | { | |
175 | int buf_ptr = dev->use_buf_ptr; | |
176 | vlan_bd_t bd; | |
177 | ||
178 | do { | |
179 | buf_ptr += 8; | |
180 | if (buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) { | |
181 | buf_ptr = VLAN_RX_BDS_OFF; | |
182 | } | |
183 | ||
184 | bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr); | |
e8bb33de LV |
185 | |
186 | trace_spapr_vlan_get_rx_bd_from_page(buf_ptr, (uint64_t)bd); | |
d6f39fdf TH |
187 | } while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) |
188 | && buf_ptr != dev->use_buf_ptr); | |
189 | ||
190 | if (!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) { | |
191 | /* Failed to find a suitable buffer */ | |
192 | return 0; | |
193 | } | |
194 | ||
195 | /* Remove the buffer from the pool */ | |
196 | dev->use_buf_ptr = buf_ptr; | |
197 | vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0); | |
198 | ||
e8bb33de | 199 | trace_spapr_vlan_get_rx_bd_from_page_found(dev->use_buf_ptr, dev->rx_bufs); |
d6f39fdf TH |
200 | |
201 | return bd; | |
202 | } | |
203 | ||
4e68f7a0 | 204 | static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf, |
8d90ad90 DG |
205 | size_t size) |
206 | { | |
ce2918cb DG |
207 | SpaprVioVlan *dev = qemu_get_nic_opaque(nc); |
208 | SpaprVioDevice *sdev = VIO_SPAPR_DEVICE(dev); | |
ad0ebb91 | 209 | vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF); |
8d90ad90 | 210 | vlan_bd_t bd; |
8d90ad90 DG |
211 | uint64_t handle; |
212 | uint8_t control; | |
213 | ||
e8bb33de | 214 | trace_spapr_vlan_receive(sdev->qdev.id, dev->rx_bufs); |
8d90ad90 DG |
215 | |
216 | if (!dev->isopen) { | |
217 | return -1; | |
218 | } | |
219 | ||
220 | if (!dev->rx_bufs) { | |
5c29dd8c | 221 | spapr_vlan_record_dropped_rx_frame(dev); |
8836630f | 222 | return 0; |
8d90ad90 DG |
223 | } |
224 | ||
831e8822 TH |
225 | if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { |
226 | bd = spapr_vlan_get_rx_bd_from_pool(dev, size); | |
227 | } else { | |
228 | bd = spapr_vlan_get_rx_bd_from_page(dev, size); | |
229 | } | |
d6f39fdf | 230 | if (!bd) { |
5c29dd8c | 231 | spapr_vlan_record_dropped_rx_frame(dev); |
8836630f | 232 | return 0; |
8d90ad90 DG |
233 | } |
234 | ||
8d90ad90 | 235 | dev->rx_bufs--; |
8d90ad90 DG |
236 | |
237 | /* Transfer the packet data */ | |
ad0ebb91 | 238 | if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) { |
8d90ad90 DG |
239 | return -1; |
240 | } | |
241 | ||
e8bb33de | 242 | trace_spapr_vlan_receive_dma_completed(); |
8d90ad90 DG |
243 | |
244 | /* Update the receive queue */ | |
245 | control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID; | |
246 | if (rxq_bd & VLAN_BD_TOGGLE) { | |
247 | control ^= VLAN_RXQC_TOGGLE; | |
248 | } | |
249 | ||
ad0ebb91 DG |
250 | handle = vio_ldq(sdev, VLAN_BD_ADDR(bd)); |
251 | vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle); | |
252 | vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size); | |
253 | vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8); | |
254 | vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control); | |
8d90ad90 | 255 | |
e8bb33de LV |
256 | trace_spapr_vlan_receive_wrote(dev->rxq_ptr, |
257 | vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + | |
258 | dev->rxq_ptr), | |
259 | vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + | |
260 | dev->rxq_ptr + 8)); | |
8d90ad90 DG |
261 | |
262 | dev->rxq_ptr += 16; | |
263 | if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) { | |
264 | dev->rxq_ptr = 0; | |
ad0ebb91 | 265 | vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE); |
8d90ad90 DG |
266 | } |
267 | ||
268 | if (sdev->signal_state & 1) { | |
7678b74a | 269 | spapr_vio_irq_pulse(sdev); |
8d90ad90 DG |
270 | } |
271 | ||
272 | return size; | |
273 | } | |
274 | ||
275 | static NetClientInfo net_spapr_vlan_info = { | |
f394b2e2 | 276 | .type = NET_CLIENT_DRIVER_NIC, |
8d90ad90 DG |
277 | .size = sizeof(NICState), |
278 | .can_receive = spapr_vlan_can_receive, | |
279 | .receive = spapr_vlan_receive, | |
280 | }; | |
281 | ||
8836630f TH |
282 | static void spapr_vlan_flush_rx_queue(void *opaque) |
283 | { | |
ce2918cb | 284 | SpaprVioVlan *dev = opaque; |
8836630f TH |
285 | |
286 | qemu_flush_queued_packets(qemu_get_queue(dev->nic)); | |
287 | } | |
288 | ||
831e8822 TH |
289 | static void spapr_vlan_reset_rx_pool(RxBufPool *rxp) |
290 | { | |
291 | /* | |
292 | * Use INT_MAX as bufsize so that unused buffers are moved to the end | |
293 | * of the list during the qsort in spapr_vlan_add_rxbuf_to_pool() later. | |
294 | */ | |
295 | rxp->bufsize = INT_MAX; | |
296 | rxp->count = 0; | |
297 | memset(rxp->bds, 0, sizeof(rxp->bds)); | |
298 | } | |
299 | ||
ce2918cb | 300 | static void spapr_vlan_reset(SpaprVioDevice *sdev) |
c17491b6 | 301 | { |
ce2918cb | 302 | SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); |
831e8822 | 303 | int i; |
c17491b6 DG |
304 | |
305 | dev->buf_list = 0; | |
306 | dev->rx_bufs = 0; | |
307 | dev->isopen = 0; | |
831e8822 TH |
308 | |
309 | if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { | |
310 | for (i = 0; i < RX_MAX_POOLS; i++) { | |
311 | spapr_vlan_reset_rx_pool(dev->rx_pool[i]); | |
312 | } | |
313 | } | |
32f5f50d LV |
314 | |
315 | memcpy(&dev->nicconf.macaddr.a, &dev->perm_mac.a, | |
316 | sizeof(dev->nicconf.macaddr.a)); | |
317 | qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a); | |
c17491b6 DG |
318 | } |
319 | ||
ce2918cb | 320 | static void spapr_vlan_realize(SpaprVioDevice *sdev, Error **errp) |
8d90ad90 | 321 | { |
ce2918cb | 322 | SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); |
8d90ad90 DG |
323 | |
324 | qemu_macaddr_default_if_unset(&dev->nicconf.macaddr); | |
325 | ||
32f5f50d LV |
326 | memcpy(&dev->perm_mac.a, &dev->nicconf.macaddr.a, sizeof(dev->perm_mac.a)); |
327 | ||
8d90ad90 | 328 | dev->nic = qemu_new_nic(&net_spapr_vlan_info, &dev->nicconf, |
f79f2bfc | 329 | object_get_typename(OBJECT(sdev)), sdev->qdev.id, dev); |
b356f76d | 330 | qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a); |
8836630f TH |
331 | |
332 | dev->rxp_timer = timer_new_us(QEMU_CLOCK_VIRTUAL, spapr_vlan_flush_rx_queue, | |
333 | dev); | |
8d90ad90 DG |
334 | } |
335 | ||
dfe79cf2 GA |
336 | static void spapr_vlan_instance_init(Object *obj) |
337 | { | |
ce2918cb | 338 | SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj); |
831e8822 | 339 | int i; |
dfe79cf2 GA |
340 | |
341 | device_add_bootindex_property(obj, &dev->nicconf.bootindex, | |
342 | "bootindex", "", | |
40c2281c | 343 | DEVICE(dev)); |
831e8822 TH |
344 | |
345 | if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { | |
346 | for (i = 0; i < RX_MAX_POOLS; i++) { | |
347 | dev->rx_pool[i] = g_new(RxBufPool, 1); | |
348 | spapr_vlan_reset_rx_pool(dev->rx_pool[i]); | |
349 | } | |
350 | } | |
351 | } | |
352 | ||
353 | static void spapr_vlan_instance_finalize(Object *obj) | |
354 | { | |
ce2918cb | 355 | SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj); |
831e8822 TH |
356 | int i; |
357 | ||
358 | if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { | |
359 | for (i = 0; i < RX_MAX_POOLS; i++) { | |
360 | g_free(dev->rx_pool[i]); | |
361 | dev->rx_pool[i] = NULL; | |
362 | } | |
363 | } | |
8836630f TH |
364 | |
365 | if (dev->rxp_timer) { | |
366 | timer_del(dev->rxp_timer); | |
367 | timer_free(dev->rxp_timer); | |
368 | } | |
dfe79cf2 GA |
369 | } |
370 | ||
ce2918cb | 371 | void spapr_vlan_create(SpaprVioBus *bus, NICInfo *nd) |
8d90ad90 DG |
372 | { |
373 | DeviceState *dev; | |
8d90ad90 | 374 | |
3e80f690 | 375 | dev = qdev_new("spapr-vlan"); |
8d90ad90 DG |
376 | |
377 | qdev_set_nic_properties(dev, nd); | |
378 | ||
3e80f690 | 379 | qdev_realize_and_unref(dev, &bus->bus, &error_fatal); |
8d90ad90 DG |
380 | } |
381 | ||
ce2918cb | 382 | static int spapr_vlan_devnode(SpaprVioDevice *dev, void *fdt, int node_off) |
8d90ad90 | 383 | { |
ce2918cb | 384 | SpaprVioVlan *vdev = VIO_SPAPR_VLAN_DEVICE(dev); |
8d90ad90 DG |
385 | uint8_t padded_mac[8] = {0, 0}; |
386 | int ret; | |
387 | ||
388 | /* Some old phyp versions give the mac address in an 8-byte | |
87684b4c | 389 | * property. The kernel driver (before 3.10) has an insane workaround; |
8d90ad90 DG |
390 | * rather than doing the obvious thing and checking the property |
391 | * length, it checks whether the first byte has 0b10 in the low | |
392 | * bits. If a correct 6-byte property has a different first byte | |
393 | * the kernel will get the wrong mac address, overrunning its | |
394 | * buffer in the process (read only, thank goodness). | |
395 | * | |
87684b4c SB |
396 | * Here we return a 6-byte address unless that would break a pre-3.10 |
397 | * driver. In that case we return a padded 8-byte address to allow the old | |
398 | * workaround to succeed. */ | |
399 | if ((vdev->nicconf.macaddr.a[0] & 0x3) == 0x2) { | |
400 | ret = fdt_setprop(fdt, node_off, "local-mac-address", | |
401 | &vdev->nicconf.macaddr, ETH_ALEN); | |
402 | } else { | |
403 | memcpy(&padded_mac[2], &vdev->nicconf.macaddr, ETH_ALEN); | |
404 | ret = fdt_setprop(fdt, node_off, "local-mac-address", | |
405 | padded_mac, sizeof(padded_mac)); | |
406 | } | |
8d90ad90 DG |
407 | if (ret < 0) { |
408 | return ret; | |
409 | } | |
410 | ||
411 | ret = fdt_setprop_cell(fdt, node_off, "ibm,mac-address-filters", 0); | |
412 | if (ret < 0) { | |
413 | return ret; | |
414 | } | |
415 | ||
416 | return 0; | |
417 | } | |
418 | ||
ce2918cb | 419 | static int check_bd(SpaprVioVlan *dev, vlan_bd_t bd, |
8d90ad90 DG |
420 | target_ulong alignment) |
421 | { | |
422 | if ((VLAN_BD_ADDR(bd) % alignment) | |
423 | || (VLAN_BD_LEN(bd) % alignment)) { | |
424 | return -1; | |
425 | } | |
426 | ||
ad0ebb91 DG |
427 | if (!spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd), |
428 | VLAN_BD_LEN(bd), DMA_DIRECTION_FROM_DEVICE) | |
429 | || !spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd), | |
430 | VLAN_BD_LEN(bd), DMA_DIRECTION_TO_DEVICE)) { | |
8d90ad90 DG |
431 | return -1; |
432 | } | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
b13ce26d | 437 | static target_ulong h_register_logical_lan(PowerPCCPU *cpu, |
ce2918cb | 438 | SpaprMachineState *spapr, |
8d90ad90 DG |
439 | target_ulong opcode, |
440 | target_ulong *args) | |
441 | { | |
442 | target_ulong reg = args[0]; | |
443 | target_ulong buf_list = args[1]; | |
444 | target_ulong rec_queue = args[2]; | |
445 | target_ulong filter_list = args[3]; | |
ce2918cb DG |
446 | SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); |
447 | SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); | |
8d90ad90 | 448 | vlan_bd_t filter_list_bd; |
8d90ad90 DG |
449 | |
450 | if (!dev) { | |
451 | return H_PARAMETER; | |
452 | } | |
453 | ||
454 | if (dev->isopen) { | |
455 | hcall_dprintf("H_REGISTER_LOGICAL_LAN called twice without " | |
456 | "H_FREE_LOGICAL_LAN\n"); | |
457 | return H_RESOURCE; | |
458 | } | |
459 | ||
ad0ebb91 DG |
460 | if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE), |
461 | SPAPR_TCE_PAGE_SIZE) < 0) { | |
d9599c92 | 462 | hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list); |
8d90ad90 DG |
463 | return H_PARAMETER; |
464 | } | |
465 | ||
ad0ebb91 DG |
466 | filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE); |
467 | if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) { | |
d9599c92 | 468 | hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list); |
8d90ad90 DG |
469 | return H_PARAMETER; |
470 | } | |
471 | ||
472 | if (!(rec_queue & VLAN_BD_VALID) | |
473 | || (check_bd(dev, rec_queue, VLAN_RQ_ALIGNMENT) < 0)) { | |
d9599c92 | 474 | hcall_dprintf("Bad receive queue\n"); |
8d90ad90 DG |
475 | return H_PARAMETER; |
476 | } | |
477 | ||
478 | dev->buf_list = buf_list; | |
479 | sdev->signal_state = 0; | |
480 | ||
481 | rec_queue &= ~VLAN_BD_TOGGLE; | |
482 | ||
483 | /* Initialize the buffer list */ | |
ad0ebb91 DG |
484 | vio_stq(sdev, buf_list, rec_queue); |
485 | vio_stq(sdev, buf_list + 8, filter_list_bd); | |
486 | spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0, | |
487 | SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF); | |
8d90ad90 DG |
488 | dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8; |
489 | dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8; | |
490 | dev->rx_bufs = 0; | |
491 | dev->rxq_ptr = 0; | |
492 | ||
493 | /* Initialize the receive queue */ | |
ad0ebb91 | 494 | spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue)); |
8d90ad90 DG |
495 | |
496 | dev->isopen = 1; | |
e0ff466c AK |
497 | qemu_flush_queued_packets(qemu_get_queue(dev->nic)); |
498 | ||
8d90ad90 DG |
499 | return H_SUCCESS; |
500 | } | |
501 | ||
502 | ||
28e02042 | 503 | static target_ulong h_free_logical_lan(PowerPCCPU *cpu, |
ce2918cb | 504 | SpaprMachineState *spapr, |
8d90ad90 DG |
505 | target_ulong opcode, target_ulong *args) |
506 | { | |
507 | target_ulong reg = args[0]; | |
ce2918cb DG |
508 | SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); |
509 | SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); | |
8d90ad90 DG |
510 | |
511 | if (!dev) { | |
512 | return H_PARAMETER; | |
513 | } | |
514 | ||
515 | if (!dev->isopen) { | |
516 | hcall_dprintf("H_FREE_LOGICAL_LAN called without " | |
517 | "H_REGISTER_LOGICAL_LAN\n"); | |
518 | return H_RESOURCE; | |
519 | } | |
520 | ||
c17491b6 | 521 | spapr_vlan_reset(sdev); |
8d90ad90 DG |
522 | return H_SUCCESS; |
523 | } | |
524 | ||
831e8822 TH |
525 | /** |
526 | * Used for qsort, this function compares two RxBufPools by size. | |
527 | */ | |
528 | static int rx_pool_size_compare(const void *p1, const void *p2) | |
529 | { | |
530 | const RxBufPool *pool1 = *(RxBufPool **)p1; | |
531 | const RxBufPool *pool2 = *(RxBufPool **)p2; | |
532 | ||
533 | if (pool1->bufsize < pool2->bufsize) { | |
534 | return -1; | |
535 | } | |
536 | return pool1->bufsize > pool2->bufsize; | |
537 | } | |
538 | ||
539 | /** | |
540 | * Search for a matching buffer pool with exact matching size, | |
541 | * or return -1 if no matching pool has been found. | |
542 | */ | |
ce2918cb | 543 | static int spapr_vlan_get_rx_pool_id(SpaprVioVlan *dev, int size) |
831e8822 TH |
544 | { |
545 | int pool; | |
546 | ||
547 | for (pool = 0; pool < RX_MAX_POOLS; pool++) { | |
548 | if (dev->rx_pool[pool]->bufsize == size) { | |
549 | return pool; | |
550 | } | |
551 | } | |
552 | ||
553 | return -1; | |
554 | } | |
555 | ||
556 | /** | |
557 | * Enqueuing receive buffer by adding it to one of our receive buffer pools | |
558 | */ | |
ce2918cb | 559 | static target_long spapr_vlan_add_rxbuf_to_pool(SpaprVioVlan *dev, |
831e8822 TH |
560 | target_ulong buf) |
561 | { | |
562 | int size = VLAN_BD_LEN(buf); | |
563 | int pool; | |
564 | ||
565 | pool = spapr_vlan_get_rx_pool_id(dev, size); | |
566 | if (pool < 0) { | |
567 | /* | |
568 | * No matching pool found? Try to use a new one. If the guest used all | |
b12227af | 569 | * pools before, but changed the size of one pool in the meantime, we might |
831e8822 TH |
570 | * need to recycle that pool here (if it's empty already). Thus scan |
571 | * all buffer pools now, starting with the last (likely empty) one. | |
572 | */ | |
573 | for (pool = RX_MAX_POOLS - 1; pool >= 0 ; pool--) { | |
574 | if (dev->rx_pool[pool]->count == 0) { | |
575 | dev->rx_pool[pool]->bufsize = size; | |
576 | /* | |
577 | * Sort pools by size so that spapr_vlan_receive() | |
578 | * can later find the smallest buffer pool easily. | |
579 | */ | |
580 | qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]), | |
581 | rx_pool_size_compare); | |
582 | pool = spapr_vlan_get_rx_pool_id(dev, size); | |
e8bb33de LV |
583 | trace_spapr_vlan_add_rxbuf_to_pool_create(pool, |
584 | VLAN_BD_LEN(buf)); | |
831e8822 TH |
585 | break; |
586 | } | |
587 | } | |
588 | } | |
589 | /* Still no usable pool? Give up */ | |
590 | if (pool < 0 || dev->rx_pool[pool]->count >= RX_POOL_MAX_BDS) { | |
591 | return H_RESOURCE; | |
592 | } | |
593 | ||
e8bb33de LV |
594 | trace_spapr_vlan_add_rxbuf_to_pool(pool, VLAN_BD_LEN(buf), |
595 | dev->rx_pool[pool]->count); | |
831e8822 TH |
596 | |
597 | dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf; | |
598 | ||
599 | return 0; | |
600 | } | |
601 | ||
602 | /** | |
603 | * This is the old way of enqueuing receive buffers: Add it to the rx queue | |
604 | * page that has been supplied by the guest (which is quite limited in size). | |
605 | */ | |
ce2918cb | 606 | static target_long spapr_vlan_add_rxbuf_to_page(SpaprVioVlan *dev, |
d6f39fdf TH |
607 | target_ulong buf) |
608 | { | |
609 | vlan_bd_t bd; | |
610 | ||
611 | if (dev->rx_bufs >= VLAN_MAX_BUFS) { | |
612 | return H_RESOURCE; | |
613 | } | |
614 | ||
615 | do { | |
616 | dev->add_buf_ptr += 8; | |
617 | if (dev->add_buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) { | |
618 | dev->add_buf_ptr = VLAN_RX_BDS_OFF; | |
619 | } | |
620 | ||
621 | bd = vio_ldq(&dev->sdev, dev->buf_list + dev->add_buf_ptr); | |
622 | } while (bd & VLAN_BD_VALID); | |
623 | ||
624 | vio_stq(&dev->sdev, dev->buf_list + dev->add_buf_ptr, buf); | |
625 | ||
e8bb33de | 626 | trace_spapr_vlan_add_rxbuf_to_page(dev->add_buf_ptr, dev->rx_bufs, buf); |
d6f39fdf TH |
627 | |
628 | return 0; | |
629 | } | |
630 | ||
b13ce26d | 631 | static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu, |
ce2918cb | 632 | SpaprMachineState *spapr, |
8d90ad90 DG |
633 | target_ulong opcode, |
634 | target_ulong *args) | |
635 | { | |
636 | target_ulong reg = args[0]; | |
637 | target_ulong buf = args[1]; | |
ce2918cb DG |
638 | SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); |
639 | SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); | |
d6f39fdf | 640 | target_long ret; |
8d90ad90 | 641 | |
e8bb33de | 642 | trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf); |
8d90ad90 DG |
643 | |
644 | if (!sdev) { | |
d9599c92 | 645 | hcall_dprintf("Bad device\n"); |
8d90ad90 DG |
646 | return H_PARAMETER; |
647 | } | |
648 | ||
649 | if ((check_bd(dev, buf, 4) < 0) | |
650 | || (VLAN_BD_LEN(buf) < 16)) { | |
d9599c92 | 651 | hcall_dprintf("Bad buffer enqueued\n"); |
8d90ad90 DG |
652 | return H_PARAMETER; |
653 | } | |
654 | ||
d6f39fdf | 655 | if (!dev->isopen) { |
8d90ad90 DG |
656 | return H_RESOURCE; |
657 | } | |
658 | ||
831e8822 TH |
659 | if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { |
660 | ret = spapr_vlan_add_rxbuf_to_pool(dev, buf); | |
661 | } else { | |
662 | ret = spapr_vlan_add_rxbuf_to_page(dev, buf); | |
663 | } | |
d6f39fdf TH |
664 | if (ret) { |
665 | return ret; | |
666 | } | |
8d90ad90 DG |
667 | |
668 | dev->rx_bufs++; | |
669 | ||
8836630f TH |
670 | /* |
671 | * Give guest some more time to add additional RX buffers before we | |
672 | * flush the receive queue, so that e.g. fragmented IP packets can | |
673 | * be passed to the guest in one go later (instead of passing single | |
674 | * fragments if there is only one receive buffer available). | |
675 | */ | |
676 | timer_mod(dev->rxp_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 500); | |
0a61f3b4 | 677 | |
8d90ad90 DG |
678 | return H_SUCCESS; |
679 | } | |
680 | ||
28e02042 | 681 | static target_ulong h_send_logical_lan(PowerPCCPU *cpu, |
ce2918cb | 682 | SpaprMachineState *spapr, |
8d90ad90 DG |
683 | target_ulong opcode, target_ulong *args) |
684 | { | |
685 | target_ulong reg = args[0]; | |
686 | target_ulong *bufs = args + 1; | |
687 | target_ulong continue_token = args[7]; | |
ce2918cb DG |
688 | SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); |
689 | SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); | |
8d90ad90 DG |
690 | unsigned total_len; |
691 | uint8_t *lbuf, *p; | |
692 | int i, nbufs; | |
693 | int ret; | |
694 | ||
e8bb33de | 695 | trace_spapr_vlan_h_send_logical_lan(reg, continue_token); |
8d90ad90 DG |
696 | |
697 | if (!sdev) { | |
698 | return H_PARAMETER; | |
699 | } | |
700 | ||
e8bb33de | 701 | trace_spapr_vlan_h_send_logical_lan_rxbufs(dev->rx_bufs); |
8d90ad90 DG |
702 | |
703 | if (!dev->isopen) { | |
704 | return H_DROPPED; | |
705 | } | |
706 | ||
707 | if (continue_token) { | |
708 | return H_HARDWARE; /* FIXME actually handle this */ | |
709 | } | |
710 | ||
711 | total_len = 0; | |
712 | for (i = 0; i < 6; i++) { | |
e8bb33de | 713 | trace_spapr_vlan_h_send_logical_lan_buf_desc(bufs[i]); |
8d90ad90 DG |
714 | if (!(bufs[i] & VLAN_BD_VALID)) { |
715 | break; | |
716 | } | |
717 | total_len += VLAN_BD_LEN(bufs[i]); | |
718 | } | |
719 | ||
720 | nbufs = i; | |
e8bb33de | 721 | trace_spapr_vlan_h_send_logical_lan_total(nbufs, total_len); |
8d90ad90 DG |
722 | |
723 | if (total_len == 0) { | |
724 | return H_SUCCESS; | |
725 | } | |
726 | ||
727 | if (total_len > MAX_PACKET_SIZE) { | |
728 | /* Don't let the guest force too large an allocation */ | |
729 | return H_RESOURCE; | |
730 | } | |
731 | ||
732 | lbuf = alloca(total_len); | |
733 | p = lbuf; | |
734 | for (i = 0; i < nbufs; i++) { | |
ad0ebb91 | 735 | ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]), |
8d90ad90 DG |
736 | p, VLAN_BD_LEN(bufs[i])); |
737 | if (ret < 0) { | |
738 | return ret; | |
739 | } | |
740 | ||
741 | p += VLAN_BD_LEN(bufs[i]); | |
742 | } | |
743 | ||
b356f76d | 744 | qemu_send_packet(qemu_get_queue(dev->nic), lbuf, total_len); |
8d90ad90 DG |
745 | |
746 | return H_SUCCESS; | |
747 | } | |
748 | ||
ce2918cb | 749 | static target_ulong h_multicast_ctrl(PowerPCCPU *cpu, SpaprMachineState *spapr, |
8d90ad90 DG |
750 | target_ulong opcode, target_ulong *args) |
751 | { | |
752 | target_ulong reg = args[0]; | |
ce2918cb | 753 | SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); |
8d90ad90 DG |
754 | |
755 | if (!dev) { | |
756 | return H_PARAMETER; | |
757 | } | |
758 | ||
759 | return H_SUCCESS; | |
760 | } | |
761 | ||
32f5f50d | 762 | static target_ulong h_change_logical_lan_mac(PowerPCCPU *cpu, |
ce2918cb | 763 | SpaprMachineState *spapr, |
32f5f50d LV |
764 | target_ulong opcode, |
765 | target_ulong *args) | |
766 | { | |
767 | target_ulong reg = args[0]; | |
768 | target_ulong macaddr = args[1]; | |
ce2918cb DG |
769 | SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); |
770 | SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); | |
32f5f50d LV |
771 | int i; |
772 | ||
773 | for (i = 0; i < ETH_ALEN; i++) { | |
774 | dev->nicconf.macaddr.a[ETH_ALEN - i - 1] = macaddr & 0xff; | |
775 | macaddr >>= 8; | |
776 | } | |
777 | ||
778 | qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a); | |
779 | ||
780 | return H_SUCCESS; | |
781 | } | |
782 | ||
3954d33a | 783 | static Property spapr_vlan_properties[] = { |
ce2918cb DG |
784 | DEFINE_SPAPR_PROPERTIES(SpaprVioVlan, sdev), |
785 | DEFINE_NIC_PROPERTIES(SpaprVioVlan, nicconf), | |
786 | DEFINE_PROP_BIT("use-rx-buffer-pools", SpaprVioVlan, | |
57c522f4 | 787 | compat_flags, SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT, true), |
3954d33a AL |
788 | DEFINE_PROP_END_OF_LIST(), |
789 | }; | |
790 | ||
831e8822 TH |
791 | static bool spapr_vlan_rx_buffer_pools_needed(void *opaque) |
792 | { | |
ce2918cb | 793 | SpaprVioVlan *dev = opaque; |
831e8822 TH |
794 | |
795 | return (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) != 0; | |
796 | } | |
797 | ||
798 | static const VMStateDescription vmstate_rx_buffer_pool = { | |
799 | .name = "spapr_llan/rx_buffer_pool", | |
800 | .version_id = 1, | |
801 | .minimum_version_id = 1, | |
802 | .needed = spapr_vlan_rx_buffer_pools_needed, | |
803 | .fields = (VMStateField[]) { | |
804 | VMSTATE_INT32(bufsize, RxBufPool), | |
805 | VMSTATE_INT32(count, RxBufPool), | |
806 | VMSTATE_UINT64_ARRAY(bds, RxBufPool, RX_POOL_MAX_BDS), | |
807 | VMSTATE_END_OF_LIST() | |
808 | } | |
809 | }; | |
810 | ||
811 | static const VMStateDescription vmstate_rx_pools = { | |
812 | .name = "spapr_llan/rx_pools", | |
813 | .version_id = 1, | |
814 | .minimum_version_id = 1, | |
815 | .needed = spapr_vlan_rx_buffer_pools_needed, | |
816 | .fields = (VMStateField[]) { | |
ce2918cb | 817 | VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(rx_pool, SpaprVioVlan, |
831e8822 TH |
818 | RX_MAX_POOLS, 1, |
819 | vmstate_rx_buffer_pool, RxBufPool), | |
820 | VMSTATE_END_OF_LIST() | |
821 | } | |
822 | }; | |
823 | ||
686fefe4 DG |
824 | static const VMStateDescription vmstate_spapr_llan = { |
825 | .name = "spapr_llan", | |
826 | .version_id = 1, | |
827 | .minimum_version_id = 1, | |
3aff6c2f | 828 | .fields = (VMStateField[]) { |
ce2918cb | 829 | VMSTATE_SPAPR_VIO(sdev, SpaprVioVlan), |
686fefe4 | 830 | /* LLAN state */ |
ce2918cb DG |
831 | VMSTATE_BOOL(isopen, SpaprVioVlan), |
832 | VMSTATE_UINT64(buf_list, SpaprVioVlan), | |
833 | VMSTATE_UINT32(add_buf_ptr, SpaprVioVlan), | |
834 | VMSTATE_UINT32(use_buf_ptr, SpaprVioVlan), | |
835 | VMSTATE_UINT32(rx_bufs, SpaprVioVlan), | |
836 | VMSTATE_UINT64(rxq_ptr, SpaprVioVlan), | |
686fefe4 DG |
837 | |
838 | VMSTATE_END_OF_LIST() | |
839 | }, | |
831e8822 TH |
840 | .subsections = (const VMStateDescription * []) { |
841 | &vmstate_rx_pools, | |
842 | NULL | |
843 | } | |
686fefe4 DG |
844 | }; |
845 | ||
3954d33a AL |
846 | static void spapr_vlan_class_init(ObjectClass *klass, void *data) |
847 | { | |
39bffca2 | 848 | DeviceClass *dc = DEVICE_CLASS(klass); |
ce2918cb | 849 | SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); |
3954d33a | 850 | |
28b07e73 | 851 | k->realize = spapr_vlan_realize; |
c17491b6 | 852 | k->reset = spapr_vlan_reset; |
3954d33a AL |
853 | k->devnode = spapr_vlan_devnode; |
854 | k->dt_name = "l-lan"; | |
855 | k->dt_type = "network"; | |
856 | k->dt_compatible = "IBM,l-lan"; | |
857 | k->signal_mask = 0x1; | |
29fdedfe | 858 | set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); |
4f67d30b | 859 | device_class_set_props(dc, spapr_vlan_properties); |
ad0ebb91 | 860 | k->rtce_window_size = 0x10000000; |
686fefe4 | 861 | dc->vmsd = &vmstate_spapr_llan; |
3954d33a AL |
862 | } |
863 | ||
8c43a6f0 | 864 | static const TypeInfo spapr_vlan_info = { |
fd506b4f | 865 | .name = TYPE_VIO_SPAPR_VLAN_DEVICE, |
39bffca2 | 866 | .parent = TYPE_VIO_SPAPR_DEVICE, |
ce2918cb | 867 | .instance_size = sizeof(SpaprVioVlan), |
39bffca2 | 868 | .class_init = spapr_vlan_class_init, |
dfe79cf2 | 869 | .instance_init = spapr_vlan_instance_init, |
831e8822 | 870 | .instance_finalize = spapr_vlan_instance_finalize, |
8d90ad90 DG |
871 | }; |
872 | ||
83f7d43a | 873 | static void spapr_vlan_register_types(void) |
8d90ad90 | 874 | { |
1fc02533 DG |
875 | spapr_register_hypercall(H_REGISTER_LOGICAL_LAN, h_register_logical_lan); |
876 | spapr_register_hypercall(H_FREE_LOGICAL_LAN, h_free_logical_lan); | |
877 | spapr_register_hypercall(H_SEND_LOGICAL_LAN, h_send_logical_lan); | |
878 | spapr_register_hypercall(H_ADD_LOGICAL_LAN_BUFFER, | |
879 | h_add_logical_lan_buffer); | |
880 | spapr_register_hypercall(H_MULTICAST_CTRL, h_multicast_ctrl); | |
32f5f50d LV |
881 | spapr_register_hypercall(H_CHANGE_LOGICAL_LAN_MAC, |
882 | h_change_logical_lan_mac); | |
39bffca2 | 883 | type_register_static(&spapr_vlan_info); |
8d90ad90 | 884 | } |
83f7d43a AF |
885 | |
886 | type_init(spapr_vlan_register_types) |