From 5f11eae3783bc9237415ff8f38b8676c5ef34520 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Fri, 13 Dec 2013 09:44:42 +0530 Subject: sparc64: convert spinlock_t to raw_spinlock_t in mmu_context_t Issue debugged by Thomas Gleixner Signed-off-by: Allen Pais Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index cd3756a..ffc749e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -26,6 +26,7 @@ config SPARC select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG select HAVE_ARCH_JUMP_LABEL if SPARC64 + select IRQ_FORCED_THREADING select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select USE_GENERIC_SMP_HELPERS if SMP diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 76092c4..e945ddb 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -90,7 +90,7 @@ struct tsb_config { #endif typedef struct { - spinlock_t lock; + raw_spinlock_t lock; unsigned long sparc64_ctx_val; unsigned long huge_pte_count; struct page *pgtable_page; diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 3d528f0..3a85624 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -77,7 +77,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str if (unlikely(mm == &init_mm)) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); ctx_valid = CTX_VALID(mm->context); if (!ctx_valid) get_new_mmu_context(mm); @@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define deactivate_mm(tsk,mm) do { } while (0) @@ -136,7 +136,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm unsigned long flags; int cpu; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); cpu = smp_processor_id(); @@ -146,7 +146,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); tsb_context_switch(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index e142545..8c68424 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -976,12 +976,12 @@ void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *reg if (unlikely(!mm || (mm == &init_mm))) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (unlikely(!CTX_VALID(mm->context))) get_new_mmu_context(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index ed82eda..e9ddd0c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -350,7 +350,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * mm = vma->vm_mm; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) @@ -361,7 +361,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 2cc3bce..d84d4ea 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -73,7 +73,7 @@ void flush_tsb_user(struct tlb_batch *tb) struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -90,14 +90,14 @@ void flush_tsb_user(struct tlb_batch *tb) __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) { unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -114,7 +114,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K @@ -392,7 +392,7 @@ retry_tsb_alloc: * the lock and ask all other cpus running this address space * to run tsb_context_switch() to see the new TSB table. */ - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); old_tsb = mm->context.tsb_block[tsb_index].tsb; old_cache_index = @@ -407,7 +407,7 @@ retry_tsb_alloc: */ if (unlikely(old_tsb && (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; @@ -433,7 +433,7 @@ retry_tsb_alloc: mm->context.tsb_block[tsb_index].tsb = new_tsb; setup_tsb_params(mm, tsb_index, new_size); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); /* If old_tsb is NULL, we're being invoked for the first time * from init_new_context(). @@ -459,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) #endif unsigned int i; - spin_lock_init(&mm->context.lock); + raw_spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL; -- cgit v0.10.2