summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/highmem_32.c9
-rw-r--r--arch/x86/mm/iomap_32.c11
-rw-r--r--arch/x86/mm/srat.c16
4 files changed, 16 insertions, 22 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b5c8e37..5b90bbcad9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1098,7 +1098,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
- if (unlikely(!mm || pagefault_disabled())) {
+ if (unlikely(in_atomic() || !mm)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 7f96844..4500142 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -32,7 +32,6 @@ EXPORT_SYMBOL(kunmap);
*/
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- pte_t pte = mk_pte(page, prot);
unsigned long vaddr;
int idx, type;
@@ -46,10 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = pte;
-#endif
- set_pte(kmap_pte-idx, pte);
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
@@ -92,9 +88,6 @@ void __kunmap_atomic(void *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = __pte(0);
-#endif
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 62377d6..7b179b4 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -56,7 +56,6 @@ EXPORT_SYMBOL_GPL(iomap_free);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
- pte_t pte = pfn_pte(pfn, prot);
unsigned long vaddr;
int idx, type;
@@ -65,12 +64,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- WARN_ON(!pte_none(*(kmap_pte - idx)));
-
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = pte;
-#endif
- set_pte(kmap_pte - idx, pte);
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
@@ -116,9 +110,6 @@ iounmap_atomic(void __iomem *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = __pte(0);
-#endif
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 266ca91..5ecf651 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -42,15 +42,25 @@ static __init inline int srat_disabled(void)
return acpi_numa < 0;
}
-/* Callback for SLIT parsing */
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
int i, j;
- for (i = 0; i < slit->locality_count; i++)
- for (j = 0; j < slit->locality_count; j++)
+ for (i = 0; i < slit->locality_count; i++) {
+ if (pxm_to_node(i) == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < slit->locality_count; j++) {
+ if (pxm_to_node(j) == NUMA_NO_NODE)
+ continue;
numa_set_distance(pxm_to_node(i), pxm_to_node(j),
slit->entry[slit->locality_count * i + j]);
+ }
+ }
}
/* Callback for Proximity Domain -> x2APIC mapping */