unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
- size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle,
+ GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
if (!dmabuf)
return NULL;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
- LPFC_HDR_TEMPLATE_SIZE,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ LPFC_HDR_TEMPLATE_SIZE,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
rpi_hdr = NULL;
goto err_free_dmabuf;
unsigned long bar0map_len, bar2map_len;
int i, hbq_count;
void *ptr;
- int error = -ENODEV;
+ int error;
if (!pdev)
- return error;
+ return -ENODEV;
/* Set the device DMA mask size */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
return error;
+ error = -ENODEV;
/* Get the bus address of Bar0 and Bar2 and the number of bytes
* required by each mapping.
}
/* Allocate memory for SLI-2 structures */
- phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
- &phba->slim2p.phys, GFP_KERNEL);
+ phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ &phba->slim2p.phys, GFP_KERNEL);
if (!phba->slim2p.virt)
goto out_iounmap;
* plus an alignment restriction of 16 bytes.
*/
bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
uint32_t if_type;
if (!pdev)
- return error;
+ return -ENODEV;
/* Set the device DMA mask size */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
return error;
/*