From 1fd1e6cd63143cf5d198a536d875dfc88ce179bc Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 16 Dec 2016 14:28:41 +0100 Subject: swiotlb: Convert swiotlb_force from int to enum commit ae7871be189cb41184f1e05742b4a99e2c59774d upstream. Convert the flag swiotlb_force from an int to an enum, to prepare for the advent of more possible values. Suggested-by: Konrad Rzeszutek Wilk Signed-off-by: Geert Uytterhoeven Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 3f74d0d..02265a5 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -524,7 +524,8 @@ EXPORT_SYMBOL(dummy_dma_ops); static int __init arm64_dma_init(void) { - if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) + if (swiotlb_force == SWIOTLB_FORCE || + max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) swiotlb = 1; return atomic_pool_init(); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 59bd3d4..380ebe7 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -401,7 +401,8 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { - if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) + if (swiotlb_force == SWIOTLB_FORCE || + max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) swiotlb_init(1); else swiotlb_force = SWIOTLB_NO_FORCE; diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index b47edb8..8da13d4 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -70,7 +70,7 @@ int __init pci_swiotlb_detect_override(void) { int use_swiotlb = swiotlb | swiotlb_force; - if (swiotlb_force) + if (swiotlb_force == SWIOTLB_FORCE) swiotlb = 1; return use_swiotlb; diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index 0e98e5d..5f8b4b0 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -49,7 +49,7 @@ int __init pci_xen_swiotlb_detect(void) * activate this IOMMU. If running as PV privileged, activate it * irregardless. */ - if ((xen_initial_domain() || swiotlb || swiotlb_force)) + if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE) xen_swiotlb = 1; /* If we are running under Xen, we MUST disable the native SWIOTLB. diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 87e6035..8e7a3d6 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -392,7 +392,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, if (dma_capable(dev, dev_addr, size) && !range_straddles_page_boundary(phys, size) && !xen_arch_need_swiotlb(dev, phys, dev_addr) && - !swiotlb_force) { + (swiotlb_force != SWIOTLB_FORCE)) { /* we are not interested in the dma_addr returned by * xen_dma_map_page, only in the potential cache flushes executed * by the function. */ @@ -549,7 +549,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, phys_addr_t paddr = sg_phys(sg); dma_addr_t dev_addr = xen_phys_to_bus(paddr); - if (swiotlb_force || + if (swiotlb_force == SWIOTLB_FORCE || xen_arch_need_swiotlb(hwdev, paddr, dev_addr) || !dma_capable(hwdev, dev_addr, sg->length) || range_straddles_page_boundary(paddr, sg->length)) { diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5f81f8a..746eceb 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -9,7 +9,12 @@ struct device; struct page; struct scatterlist; -extern int swiotlb_force; +enum swiotlb_force { + SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */ + SWIOTLB_FORCE, /* swiotlb=force */ +}; + +extern enum swiotlb_force swiotlb_force; /* * Maximum allowable number of contiguous slabs to map, diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h index 7ea4c5e..5e2e30a 100644 --- a/include/trace/events/swiotlb.h +++ b/include/trace/events/swiotlb.h @@ -11,16 +11,16 @@ TRACE_EVENT(swiotlb_bounced, TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size, - int swiotlb_force), + enum swiotlb_force swiotlb_force), TP_ARGS(dev, dev_addr, size, swiotlb_force), TP_STRUCT__entry( - __string( dev_name, dev_name(dev) ) - __field( u64, dma_mask ) - __field( dma_addr_t, dev_addr ) - __field( size_t, size ) - __field( int, swiotlb_force ) + __string( dev_name, dev_name(dev) ) + __field( u64, dma_mask ) + __field( dma_addr_t, dev_addr ) + __field( size_t, size ) + __field( enum swiotlb_force, swiotlb_force ) ), TP_fast_assign( @@ -37,7 +37,9 @@ TRACE_EVENT(swiotlb_bounced, __entry->dma_mask, (unsigned long long)__entry->dev_addr, __entry->size, - __entry->swiotlb_force ? "swiotlb_force" : "" ) + __print_symbolic(__entry->swiotlb_force, + { SWIOTLB_NORMAL, "NORMAL" }, + { SWIOTLB_FORCE, "FORCE" })) ); #endif /* _TRACE_SWIOTLB_H */ diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 22e13a0..68e8f49 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -53,7 +53,7 @@ */ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) -int swiotlb_force; +enum swiotlb_force swiotlb_force; /* * Used to do a quick range check in swiotlb_tbl_unmap_single and @@ -107,7 +107,7 @@ setup_io_tlb_npages(char *str) if (*str == ',') ++str; if (!strcmp(str, "force")) - swiotlb_force = 1; + swiotlb_force = SWIOTLB_FORCE; return 0; } @@ -749,7 +749,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (dma_capable(dev, dev_addr, size) && !swiotlb_force) + if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE) return dev_addr; trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); @@ -888,7 +888,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, phys_addr_t paddr = sg_phys(sg); dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); - if (swiotlb_force || + if (swiotlb_force == SWIOTLB_FORCE || !dma_capable(hwdev, dev_addr, sg->length)) { phys_addr_t map = map_single(hwdev, sg_phys(sg), sg->length, dir); -- cgit v0.10.2