diff options
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r-- | arch/sparc/kernel/ioport.c | 108 |
1 files changed, 63 insertions, 45 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 41f7e4e..c6ce9a6 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -50,10 +50,14 @@ #include <asm/io-unit.h> #include <asm/leon.h> -#ifdef CONFIG_SPARC_LEON -#define mmu_inval_dma_area(p, l) leon_flush_dcache_all() -#else +#ifndef CONFIG_SPARC_LEON #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ +#else +static inline void mmu_inval_dma_area(void *va, unsigned long len) +{ + if (!sparc_leon3_snooping_enabled()) + leon_flush_dcache_all(); +} #endif static struct resource *_sparc_find_resource(struct resource *r, @@ -254,7 +258,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, dma_addr_t *dma_addrp, gfp_t gfp) { struct platform_device *op = to_platform_device(dev); - unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; + unsigned long len_total = PAGE_ALIGN(len); unsigned long va; struct resource *res; int order; @@ -280,7 +284,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); goto err_nova; } - mmu_inval_dma_area(va, len_total); + mmu_inval_dma_area((void *)va, len_total); + // XXX The mmu_map_dma_area does this for us below, see comments. // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); /* @@ -297,9 +302,9 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, err_noiommu: release_resource(res); err_nova: - free_pages(va, order); -err_nomem: kfree(res); +err_nomem: + free_pages(va, order); err_nopages: return NULL; } @@ -321,7 +326,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p, return; } - n = (n + PAGE_SIZE-1) & PAGE_MASK; + n = PAGE_ALIGN(n); if ((res->end-res->start)+1 != n) { printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", (long)((res->end-res->start)+1), n); @@ -408,9 +413,6 @@ struct dma_map_ops sbus_dma_ops = { .sync_sg_for_device = sbus_sync_sg_for_device, }; -struct dma_map_ops *dma_ops = &sbus_dma_ops; -EXPORT_SYMBOL(dma_ops); - static int __init sparc_register_ioport(void) { register_proc_sparc_ioport(); @@ -422,7 +424,9 @@ arch_initcall(sparc_register_ioport); #endif /* CONFIG_SBUS */ -#ifdef CONFIG_PCI + +/* LEON reuses PCI DMA ops */ +#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON) /* Allocate and map kernel buffer using consistent mode DMA for a device. * hwdev should be valid struct pci_dev pointer for PCI devices. @@ -430,8 +434,8 @@ arch_initcall(sparc_register_ioport); static void *pci32_alloc_coherent(struct device *dev, size_t len, dma_addr_t *pba, gfp_t gfp) { - unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; - unsigned long va; + unsigned long len_total = PAGE_ALIGN(len); + void *va; struct resource *res; int order; @@ -443,34 +447,34 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len, } order = get_order(len_total); - va = __get_free_pages(GFP_KERNEL, order); - if (va == 0) { + va = (void *) __get_free_pages(GFP_KERNEL, order); + if (va == NULL) { printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); - return NULL; + goto err_nopages; } if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { - free_pages(va, order); printk("pci_alloc_consistent: no core\n"); - return NULL; + goto err_nomem; } if (allocate_resource(&_sparc_dvma, res, len_total, _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); - free_pages(va, order); - kfree(res); - return NULL; + goto err_nova; } mmu_inval_dma_area(va, len_total); -#if 0 -/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n", - (long)va, (long)res->start, (long)virt_to_phys(va), len_total); -#endif sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ return (void *) res->start; + +err_nova: + kfree(res); +err_nomem: + free_pages((unsigned long)va, order); +err_nopages: + return NULL; } /* Free and unmap a consistent DMA buffer. @@ -485,7 +489,7 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, dma_addr_t ba) { struct resource *res; - unsigned long pgp; + void *pgp; if ((res = _sparc_find_resource(&_sparc_dvma, (unsigned long)p)) == NULL) { @@ -498,21 +502,21 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, return; } - n = (n + PAGE_SIZE-1) & PAGE_MASK; + n = PAGE_ALIGN(n); if ((res->end-res->start)+1 != n) { printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", (long)((res->end-res->start)+1), (long)n); return; } - pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */ + pgp = phys_to_virt(ba); /* bus_to_virt actually */ mmu_inval_dma_area(pgp, n); sparc_unmapiorange((unsigned long)p, n); release_resource(res); kfree(res); - free_pages(pgp, get_order(n)); + free_pages((unsigned long)pgp, get_order(n)); } /* @@ -527,6 +531,13 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page, return page_to_phys(page) + offset; } +static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + if (dir != PCI_DMA_TODEVICE) + mmu_inval_dma_area(phys_to_virt(ba), PAGE_ALIGN(size)); +} + /* Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scather-gather version of the * above pci_map_single interface. Here the scatter gather list @@ -572,9 +583,8 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); - mmu_inval_dma_area( - (unsigned long) page_address(sg_page(sg)), - (sg->length + PAGE_SIZE-1) & PAGE_MASK); + mmu_inval_dma_area(page_address(sg_page(sg)), + PAGE_ALIGN(sg->length)); } } } @@ -593,8 +603,8 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, enum dma_data_direction dir) { if (dir != PCI_DMA_TODEVICE) { - mmu_inval_dma_area((unsigned long)phys_to_virt(ba), - (size + PAGE_SIZE-1) & PAGE_MASK); + mmu_inval_dma_area(phys_to_virt(ba), + PAGE_ALIGN(size)); } } @@ -602,8 +612,8 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, enum dma_data_direction dir) { if (dir != PCI_DMA_TODEVICE) { - mmu_inval_dma_area((unsigned long)phys_to_virt(ba), - (size + PAGE_SIZE-1) & PAGE_MASK); + mmu_inval_dma_area(phys_to_virt(ba), + PAGE_ALIGN(size)); } } @@ -622,9 +632,8 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); - mmu_inval_dma_area( - (unsigned long) page_address(sg_page(sg)), - (sg->length + PAGE_SIZE-1) & PAGE_MASK); + mmu_inval_dma_area(page_address(sg_page(sg)), + PAGE_ALIGN(sg->length)); } } } @@ -638,9 +647,8 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); - mmu_inval_dma_area( - (unsigned long) page_address(sg_page(sg)), - (sg->length + PAGE_SIZE-1) & PAGE_MASK); + mmu_inval_dma_area(page_address(sg_page(sg)), + PAGE_ALIGN(sg->length)); } } } @@ -649,6 +657,7 @@ struct dma_map_ops pci32_dma_ops = { .alloc_coherent = pci32_alloc_coherent, .free_coherent = pci32_free_coherent, .map_page = pci32_map_page, + .unmap_page = pci32_unmap_page, .map_sg = pci32_map_sg, .unmap_sg = pci32_unmap_sg, .sync_single_for_cpu = pci32_sync_single_for_cpu, @@ -658,7 +667,16 @@ struct dma_map_ops pci32_dma_ops = { }; EXPORT_SYMBOL(pci32_dma_ops); -#endif /* CONFIG_PCI */ +#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */ + +#ifdef CONFIG_SPARC_LEON +struct dma_map_ops *dma_ops = &pci32_dma_ops; +#elif defined(CONFIG_SBUS) +struct dma_map_ops *dma_ops = &sbus_dma_ops; +#endif + +EXPORT_SYMBOL(dma_ops); + /* * Return whether the given PCI device DMA address mask can be @@ -717,7 +735,7 @@ static const struct file_operations sparc_io_proc_fops = { static struct resource *_sparc_find_resource(struct resource *root, unsigned long hit) { - struct resource *tmp; + struct resource *tmp; for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { if (tmp->start <= hit && tmp->end >= hit) |