#include "qemu/bswap.h"
#include "hw/pci/msix.h"
#include "hw/pci/msi.h"
+#include "migration/register.h"
#include "vmxnet3.h"
#include "vmxnet_debug.h"
"addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, "
"dtype: %d, ext1: %d, msscof: %d, hlen: %d, om: %d, "
"eop: %d, cq: %d, ext2: %d, ti: %d, tci: %d",
- le64_to_cpu(descr->addr), descr->len, descr->gen, descr->rsvd,
+ descr->addr, descr->len, descr->gen, descr->rsvd,
descr->dtype, descr->ext1, descr->msscof, descr->hlen, descr->om,
descr->eop, descr->cq, descr->ext2, descr->ti, descr->tci);
}
{
VMW_PKPRN("RX DESCR: addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, "
"dtype: %d, ext1: %d, btype: %d",
- le64_to_cpu(descr->addr), descr->len, descr->gen,
+ descr->addr, descr->len, descr->gen,
descr->rsvd, descr->dtype, descr->ext1, descr->btype);
}
memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
-
+ txcq_descr.val1 = cpu_to_le32(txcq_descr.val1);
+ txcq_descr.val2 = cpu_to_le32(txcq_descr.val2);
vmxnet3_ring_write_curr_cell(d, &s->txq_descr[qidx].comp_ring, &txcq_descr);
/* Flush changes in TX descriptor before changing the counter value */
}
}
+static inline void
+vmxnet3_ring_read_curr_txdesc(PCIDevice *pcidev, Vmxnet3Ring *ring,
+ struct Vmxnet3_TxDesc *txd)
+{
+ vmxnet3_ring_read_curr_cell(pcidev, ring, txd);
+ txd->addr = le64_to_cpu(txd->addr);
+ txd->val1 = le32_to_cpu(txd->val1);
+ txd->val2 = le32_to_cpu(txd->val2);
+}
+
static inline bool
vmxnet3_pop_next_tx_descr(VMXNET3State *s,
int qidx,
Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring;
PCIDevice *d = PCI_DEVICE(s);
- vmxnet3_ring_read_curr_cell(d, ring, txd);
+ vmxnet3_ring_read_curr_txdesc(d, ring, txd);
if (txd->gen == vmxnet3_ring_curr_gen(ring)) {
/* Only read after generation field verification */
smp_rmb();
/* Re-read to be sure we got the latest version */
- vmxnet3_ring_read_curr_cell(d, ring, txd);
+ vmxnet3_ring_read_curr_txdesc(d, ring, txd);
VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring);
*descr_idx = vmxnet3_ring_curr_cell_idx(ring);
vmxnet3_inc_tx_consumption_counter(s, qidx);
if (!s->skip_current_tx_pkt) {
data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE;
- data_pa = le64_to_cpu(txd.addr);
+ data_pa = txd.addr;
if (!net_tx_pkt_add_raw_fragment(s->tx_pkt,
data_pa,
Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx];
*didx = vmxnet3_ring_curr_cell_idx(ring);
vmxnet3_ring_read_curr_cell(d, ring, dbuf);
+ dbuf->addr = le64_to_cpu(dbuf->addr);
+ dbuf->val1 = le32_to_cpu(dbuf->val1);
+ dbuf->ext1 = le32_to_cpu(dbuf->ext1);
}
static inline uint8_t
pci_dma_read(PCI_DEVICE(s),
daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc));
+ rxcd.val1 = le32_to_cpu(rxcd.val1);
+ rxcd.val2 = le32_to_cpu(rxcd.val2);
+ rxcd.val3 = le32_to_cpu(rxcd.val3);
ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring);
if (rxcd.gen != ring_gen) {
data = (uint8_t *)pkt_data + vhdr->csum_start;
len = pkt_len - vhdr->csum_start;
/* Put the checksum obtained into the packet */
- stw_be_p(data + vhdr->csum_offset, net_raw_checksum(data, len));
+ stw_be_p(data + vhdr->csum_offset,
+ net_checksum_finish_nozero(net_checksum_add(len, data)));
vhdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
vhdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID;
}
}
+static void
+vmxnet3_pci_dma_write_rxcd(PCIDevice *pcidev, dma_addr_t pa,
+ struct Vmxnet3_RxCompDesc *rxcd)
+{
+ rxcd->val1 = cpu_to_le32(rxcd->val1);
+ rxcd->val2 = cpu_to_le32(rxcd->val2);
+ rxcd->val3 = cpu_to_le32(rxcd->val3);
+ pci_dma_write(pcidev, pa, rxcd, sizeof(*rxcd));
+}
+
static bool
vmxnet3_indicate_packet(VMXNET3State *s)
{
}
chunk_size = MIN(bytes_left, rxd.len);
- vmxnet3_pci_dma_writev(d, data, bytes_copied,
- le64_to_cpu(rxd.addr), chunk_size);
+ vmxnet3_pci_dma_writev(d, data, bytes_copied, rxd.addr, chunk_size);
bytes_copied += chunk_size;
bytes_left -= chunk_size;
vmxnet3_dump_rx_descr(&rxd);
if (ready_rxcd_pa != 0) {
- pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
+ vmxnet3_pci_dma_write_rxcd(d, ready_rxcd_pa, &rxcd);
}
memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc));
rxcd.eop = 1;
rxcd.err = (bytes_left != 0);
- pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
+ vmxnet3_pci_dma_write_rxcd(d, ready_rxcd_pa, &rxcd);
/* Flush RX descriptor changes */
smp_wmb();
},
};
+static SaveVMHandlers savevm_vmxnet3_msix = {
+ .save_state = vmxnet3_msix_save,
+ .load_state = vmxnet3_msix_load,
+};
+
static uint64_t vmxnet3_device_serial_num(VMXNET3State *s)
{
uint64_t dsn_payload;
vmxnet3_net_init(s);
if (pci_is_express(pci_dev)) {
- if (pci_bus_is_express(pci_dev->bus)) {
+ if (pci_bus_is_express(pci_get_bus(pci_dev))) {
pcie_endpoint_cap_init(pci_dev, VMXNET3_EXP_EP_OFFSET);
}
vmxnet3_device_serial_num(s));
}
- register_savevm(dev, "vmxnet3-msix", -1, 1,
- vmxnet3_msix_save, vmxnet3_msix_load, s);
+ register_savevm_live(dev, "vmxnet3-msix", -1, 1, &savevm_vmxnet3_msix, s);
}
static void vmxnet3_instance_init(Object *obj)
}
-static void vmxnet3_pre_save(void *opaque)
+static int vmxnet3_pre_save(void *opaque)
{
VMXNET3State *s = opaque;
s->mcast_list_buff_size = s->mcast_list_len * sizeof(MACAddr);
+
+ return 0;
}
static const VMStateDescription vmxstate_vmxnet3_mcast_list = {
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE;
c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
- vc->parent_dc_realize = dc->realize;
- dc->realize = vmxnet3_realize;
+ device_class_set_parent_realize(dc, vmxnet3_realize,
+ &vc->parent_dc_realize);
dc->desc = "VMWare Paravirtualized Ethernet v3";
dc->reset = vmxnet3_qdev_reset;
dc->vmsd = &vmstate_vmxnet3;
.instance_size = sizeof(VMXNET3State),
.class_init = vmxnet3_class_init,
.instance_init = vmxnet3_instance_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { }
+ },
};
static void vmxnet3_register_types(void)