}
static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
- struct device *dma_dev, size_t align)
+ struct device *ntb_dev, size_t align)
{
dma_addr_t dma_addr;
void *alloc_addr, *virt_addr;
int rc;
- alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size,
- &dma_addr, GFP_KERNEL);
+ /*
+ * The buffer here is allocated against the NTB device. The reason to
+ * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer
+ * backing the NTB BAR for the remote host to write to. During receive
+ * processing, the data is being copied out of the receive buffer to
+ * the kernel skbuff. When a DMA device is being used, dma_map_page()
+ * is called on the kvaddr of the receive buffer (from dma_alloc_*())
+ * and remapped against the DMA device. It appears to be a double
+ * DMA mapping of buffers, but first is mapped to the NTB device and
+ * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary
+ * in order for the later dma_map_page() to not fail.
+ */
+ alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size,
+ &dma_addr, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
if (!alloc_addr) {
- dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n",
+ dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n",
mw->alloc_size);
return -ENOMEM;
}
return 0;
err:
- dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr);
+ dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr);
return rc;
}