summaryrefslogtreecommitdiff
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorGioh Kim <gioh.kim@lge.com>2014-05-22 04:38:23 (GMT)
committerMarek Szyprowski <m.szyprowski@samsung.com>2014-05-22 06:09:31 (GMT)
commite464ef16c4f080bb85ccf9085f92503b5b5b1e13 (patch)
tree4e0554952a1ec7de459131679a1e3138737d4eaf /arch/arm/mm/dma-mapping.c
parent006f841db1e0da8801aedc6751b563b28f9a6319 (diff)
downloadlinux-e464ef16c4f080bb85ccf9085f92503b5b5b1e13.tar.xz
arm: dma-mapping: add checking cma area initialized
If CMA is turned on and CMA size is set to zero, kernel should behave as if CMA was not enabled at compile time. Every dma allocation should check existence of cma area before requesting memory. Signed-off-by: Gioh Kim <gioh.kim@lge.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> [mszyprow: removed redundant empty line from the patch] Signed-off-by: <m.szyprowski@samsung.com>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 3d43c41..5bef8585 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -390,7 +390,7 @@ static int __init atomic_pool_init(void)
if (!pages)
goto no_pages;
- if (IS_ENABLED(CONFIG_DMA_CMA))
+ if (dev_get_cma_area(NULL))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
@@ -701,7 +701,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
- else if (!IS_ENABLED(CONFIG_DMA_CMA))
+ else if (!dev_get_cma_area(dev))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
@@ -790,7 +790,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
__dma_free_buffer(page, size);
} else if (__free_from_pool(cpu_addr, size)) {
return;
- } else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
+ } else if (!dev_get_cma_area(dev)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {