summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/dma.c
diff options
context:
space:
mode:
authorshaohui xie <Shaohui.Xie@freescale.com>2012-09-27 11:44:32 (GMT)
committerEmil Medve <Emilian.Medve@Freescale.com>2013-03-21 18:42:11 (GMT)
commit142229f7ecd8e302821fc6e3f81d934a834796af (patch)
treedd3a715d926afd345a7ad0c9cee20bcd758b74bf /arch/powerpc/kernel/dma.c
parent16e8de2280329ce5b1afd5f881ea1b434953f0e4 (diff)
downloadlinux-fsl-qoriq-142229f7ecd8e302821fc6e3f81d934a834796af.tar.xz
powerpc/mm: add ZONE_NORMAL zone for 64 bit kernel
PowerPC platform only supports ZONE_DMA zone for 64bit kernel, so all the memory will be put into this zone. If the memory size is greater than the device's DMA capability and device uses dma_alloc_coherent to allocate memory, it will get an address which is over the device's DMA addressing, the device will fail. So we split the memory to two zones by adding a zone ZONE_NORMAL, since we already allocate PCICSRBAR/PEXCSRBAR right below the 4G boundary (if the lowest PCI address is above 4G), so we constrain the DMA zone ZONE_DMA to 2GB, also, we clear the flag __GFP_DMA and set it only if the device's dma_mask < total memory size. By doing this, devices which cannot DMA all the memory will be limited to ZONE_DMA, but devices which can DMA all the memory will not be affected by this limitation. Signed-off-by: Shaohui Xie <Shaohui.Xie@freescale.com> Signed-off-by: Mingkai Hu <Mingkai.hu@freescale.com> Signed-off-by: Chen Yuanquan <B41889@freescale.com> (cherry picked from commit 070f1ae0a48417888b6ad02b15baa384823c7591)
Diffstat (limited to 'arch/powerpc/kernel/dma.c')
-rw-r--r--arch/powerpc/kernel/dma.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 8032b97..1a3dd00 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -31,6 +31,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs)
{
void *ret;
+ phys_addr_t top_ram_pfn = memblock_end_of_DRAM();
#ifdef CONFIG_NOT_COHERENT_CACHE
ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
if (ret == NULL)
@@ -41,8 +42,18 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
struct page *page;
int node = dev_to_node(dev);
+ /*
+ * check for crappy device which has dma_mask < ZONE_DMA, and
+ * we are not going to support it, just warn and fail.
+ */
+ if (*dev->dma_mask < DMA_BIT_MASK(31)) {
+ dev_err(dev, "Unsupported dma_mask 0x%llx\n", *dev->dma_mask);
+ return NULL;
+ }
/* ignore region specifiers */
- flag &= ~(__GFP_HIGHMEM);
+ flag &= ~(__GFP_HIGHMEM | __GFP_DMA);
+ if (*dev->dma_mask < top_ram_pfn - 1)
+ flag |= GFP_DMA;
page = alloc_pages_node(node, flag, get_order(size));
if (page == NULL)