struct page *page;
void *ptr = NULL;
- page = dma_alloc_from_contiguous(dev, count, order, gfp);
+ page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
if (!page)
return NULL;
unsigned long order = get_order(size);
struct page *page;
- page = dma_alloc_from_contiguous(dev, count, order, gfp);
+ page = dma_alloc_from_contiguous(dev, count, order,
+ gfp & __GFP_NOWARN);
if (!page)
goto error;
if (dev_get_cma_area(NULL))
page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order, GFP_KERNEL);
+ pool_size_order, false);
else
page = alloc_pages(GFP_DMA32, pool_size_order);
struct page *page;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), gfp);
+ get_order(size), gfp & __GFP_NOWARN);
if (!page)
return NULL;
if (gfpflags_allow_blocking(flag))
page = dma_alloc_from_contiguous(dev, count, get_order(size),
- flag);
+ flag & __GFP_NOWARN);
if (!page)
page = alloc_pages(flag, get_order(size));
return NULL;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flag);
+ get_order(size), flag & __GFP_NOWARN);
if (!page)
return NULL;
}
if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT;
- page = dma_alloc_from_contiguous(dev, count, order, flags);
+ page = dma_alloc_from_contiguous(dev, count, order,
+ flags & __GFP_NOWARN);
if (page && iommu_no_mapping(dev) &&
page_to_phys(page) + size > dev->coherent_dma_mask) {
dma_release_from_contiguous(dev, page, count);
}
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, gfp_t gfp_mask);
+ unsigned int order, bool no_warn);
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count);
static inline
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, gfp_t gfp_mask)
+ unsigned int order, bool no_warn)
{
return NULL;
}
* @dev: Pointer to device for which the allocation is performed.
* @count: Requested number of pages.
* @align: Requested alignment of pages (in PAGE_SIZE order).
- * @gfp_mask: GFP flags to use for this allocation.
+ * @no_warn: Avoid printing message about failed allocation.
*
* This function allocates memory buffer for specified device. It uses
* device specific contiguous memory area if available or the default
* function.
*/
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int align, gfp_t gfp_mask)
+ unsigned int align, bool no_warn)
{
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
- return cma_alloc(dev_get_cma_area(dev), count, align,
- gfp_mask & __GFP_NOWARN);
+ return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
}
/**
again:
/* CMA can be used only in the context which permits sleeping */
if (gfpflags_allow_blocking(gfp)) {
- page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
+ page = dma_alloc_from_contiguous(dev, count, page_order,
+ gfp & __GFP_NOWARN);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_release_from_contiguous(dev, page, count);
page = NULL;