From f2f12b6fc032c7b1419fd6db84e2868b5f05a878 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Wed, 6 Jun 2012 10:50:06 -0600 Subject: iommu/amd: Fix missing iommu_shutdown initialization in passthrough mode The iommu_shutdown callback is not initialized when the AMD IOMMU driver runs in passthrough mode. Fix that by moving the callback initialization before the check for passthrough mode. Signed-off-by: Shuah Khan Cc: stable@vger.kernel.org Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 542024b..c04ddca 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1641,6 +1641,8 @@ static int __init amd_iommu_init(void) amd_iommu_init_api(); + x86_platform.iommu_shutdown = disable_iommus; + if (iommu_pass_through) goto out; @@ -1649,8 +1651,6 @@ static int __init amd_iommu_init(void) else printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); - x86_platform.iommu_shutdown = disable_iommus; - out: return ret; -- cgit v0.10.2 From ac1534a55d1e87d59a21c09c570605933b551480 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 21 Jun 2012 14:52:40 +0200 Subject: iommu/amd: Initialize dma_ops for hotplug and sriov devices When a device is added to the system at runtime the AMD IOMMU driver initializes the necessary data structures to handle translation for it. But it forgets to change the per-device dma_ops to point to the AMD IOMMU driver. So mapping actually never happens and all DMA accesses end in an IO_PAGE_FAULT. Fix this. Reported-by: Stefan Assmann Cc: stable@vger.kernel.org Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index a2e418c..dfe7d37 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -83,6 +83,8 @@ static struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; +static struct dma_map_ops amd_iommu_dma_ops; + /* * general struct to manage commands send to an IOMMU */ @@ -2267,6 +2269,13 @@ static int device_change_notifier(struct notifier_block *nb, list_add_tail(&dma_domain->list, &iommu_pd_list); spin_unlock_irqrestore(&iommu_pd_list_lock, flags); + dev_data = get_dev_data(dev); + + if (!dev_data->passthrough) + dev->archdata.dma_ops = &amd_iommu_dma_ops; + else + dev->archdata.dma_ops = &nommu_dma_ops; + break; case BUS_NOTIFY_DEL_DEVICE: -- cgit v0.10.2 From 8f53dc724a83a0082184fa27df80c25c7df47340 Mon Sep 17 00:00:00 2001 From: Hiroshi DOYU Date: Wed, 27 Jun 2012 12:54:01 +0300 Subject: iommu/tegra: smmu: Fix unsleepable memory allocation allo_pdir() is called in smmu_iommu_domain_init() with spin_lock held. memory allocations in it have to be atomic/unsleepable. Signed-off-by: Hiroshi DOYU Reported-by: Chris Wright Acked-by: Chris Wright Cc: stable@vger.kernel.org Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index ecd6790..3f3d09d5 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -550,13 +550,13 @@ static int alloc_pdir(struct smmu_as *as) return 0; as->pte_count = devm_kzalloc(smmu->dev, - sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL); + sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC); if (!as->pte_count) { dev_err(smmu->dev, "failed to allocate smmu_device PTE cunters\n"); return -ENOMEM; } - as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA); + as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA); if (!as->pdir_page) { dev_err(smmu->dev, "failed to allocate smmu_device page directory\n"); -- cgit v0.10.2 From 68ee6d22376411f8ec668413f1b632a34192a807 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 27 Jun 2012 12:08:55 +0300 Subject: dma-debug: debugfs_create_bool() takes a u32 pointer Even though it has "bool" in the name, you have pass a u32 pointer to debugfs_create_bool(). Otherwise you get memory corruption in write_file_bool(). Fortunately in this case the corruption happens in an alignment hole between variables so it doesn't cause any problems. Signed-off-by: Dan Carpenter Signed-off-by: Joerg Roedel diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 518aea7..66ce414 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -78,7 +78,7 @@ static LIST_HEAD(free_entries); static DEFINE_SPINLOCK(free_entries_lock); /* Global disable flag - will be set in case of an error */ -static bool global_disable __read_mostly; +static u32 global_disable __read_mostly; /* Global error count */ static u32 error_count; @@ -657,7 +657,7 @@ static int dma_debug_fs_init(void) global_disable_dent = debugfs_create_bool("disabled", 0444, dma_debug_dent, - (u32 *)&global_disable); + &global_disable); if (!global_disable_dent) goto out_err; -- cgit v0.10.2 From 3775d4818d72081e2afa2aed2442a2b9ecfc5eab Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 27 Jun 2012 12:09:18 +0300 Subject: iommu/amd: fix type bug in flush code write_file_bool() modifies 32 bits of data, so "amd_iommu_unmap_flush" needs to be 32 bits as well or we'll corrupt memory. Fortunately it looks like the data is aligned with a gap after the declaration so this is harmless in production. Signed-off-by: Dan Carpenter Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index dfe7d37..6256263 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -404,7 +404,7 @@ static void amd_iommu_stats_init(void) return; de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, - (u32 *)&amd_iommu_unmap_flush); + &amd_iommu_unmap_flush); amd_iommu_stats_add(&compl_wait); amd_iommu_stats_add(&cnt_map_single); diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index c04ddca..a33612f 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -129,7 +129,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have to handle */ LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings we find in ACPI */ -bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ +u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */ diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 2435555..c1b1d48 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -652,7 +652,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap; * If true, the addresses will be flushed on unmap time, not when * they are reused */ -extern bool amd_iommu_unmap_flush; +extern u32 amd_iommu_unmap_flush; /* Smallest number of PASIDs supported by any IOMMU in the system */ extern u32 amd_iommu_max_pasids; -- cgit v0.10.2