summaryrefslogtreecommitdiff
path: root/arch/sparc
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-05-14 18:19:12 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:37:18 (GMT)
commit86ba38e6f5f2fbfe9b49e153ea89593b26482019 (patch)
treef99d2906b0eafca507f37289e68052fc105cc2dc /arch/sparc
parent07c8b57b111585a617b2b456497fc9b33c00743c (diff)
downloadlinux-fsl-qoriq-86ba38e6f5f2fbfe9b49e153ea89593b26482019.tar.xz
Reset to 3.12.19
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig13
-rw-r--r--arch/sparc/include/asm/mmu_64.h2
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h10
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/setup_32.c1
-rw-r--r--arch/sparc/kernel/setup_64.c8
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/sparc/kernel/syscalls.S4
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/sparc/mm/init_64.c14
-rw-r--r--arch/sparc/mm/tsb.c20
14 files changed, 38 insertions, 50 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index da51da9..4e56838 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,8 +25,7 @@ config SPARC
select RTC_DRV_M48T59
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
- select HAVE_ARCH_JUMP_LABEL
- select IRQ_FORCED_THREADING
+ select HAVE_ARCH_JUMP_LABEL if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select USE_GENERIC_SMP_HELPERS if SMP
@@ -178,10 +177,12 @@ config NR_CPUS
source kernel/Kconfig.hz
config RWSEM_GENERIC_SPINLOCK
- def_bool PREEMPT_RT_FULL
+ bool
+ default y if SPARC32
config RWSEM_XCHGADD_ALGORITHM
- def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+ bool
+ default y if SPARC64
config GENERIC_HWEIGHT
bool
@@ -522,10 +523,6 @@ menu "Executable file formats"
source "fs/Kconfig.binfmt"
-config EARLY_PRINTK
- bool
- default y
-
config COMPAT
bool
depends on SPARC64
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index e945ddb..76092c4 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -90,7 +90,7 @@ struct tsb_config {
#endif
typedef struct {
- raw_spinlock_t lock;
+ spinlock_t lock;
unsigned long sparc64_ctx_val;
unsigned long huge_pte_count;
struct page *pgtable_page;
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 44e393b..3d528f0 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -13,7 +13,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-extern raw_spinlock_t ctx_alloc_lock;
+extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
@@ -77,7 +77,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
if (unlikely(mm == &init_mm))
return;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
ctx_valid = CTX_VALID(mm->context);
if (!ctx_valid)
get_new_mmu_context(mm);
@@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
__flush_tlb_mm(CTX_HWBITS(mm->context),
SECONDARY_CONTEXT);
}
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
#define deactivate_mm(tsk,mm) do { } while (0)
@@ -136,7 +136,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
unsigned long flags;
int cpu;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
if (!CTX_VALID(mm->context))
get_new_mmu_context(mm);
cpu = smp_processor_id();
@@ -146,7 +146,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
load_secondary_context(mm);
__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
tsb_context_switch(mm);
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index d74fa7f..d4840ce 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -698,7 +698,6 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
void do_softirq(void)
{
unsigned long flags;
@@ -724,7 +723,6 @@ void do_softirq(void)
local_irq_restore(flags);
}
-#endif
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index dbb51a6..269af58 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -43,12 +43,10 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
void arch_irq_work_raise(void)
{
set_softint(1 << PIL_DEFERRED_PCR_WORK);
}
-#endif
const struct pcr_ops *pcr_ops;
EXPORT_SYMBOL_GPL(pcr_ops);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index baebab2..b9cc976 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -57,9 +57,12 @@ void arch_cpu_idle(void)
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
+ local_irq_enable();
} else {
unsigned long pstate;
+ local_irq_enable();
+
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
@@ -81,7 +84,6 @@ void arch_cpu_idle(void)
: "=&r" (pstate)
: "i" (PSTATE_IE));
}
- local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 0884ccd..1434526 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -309,7 +309,6 @@ void __init setup_arch(char **cmdline_p)
boot_flags_init(*cmdline_p);
- early_console = &prom_early_console;
register_console(&prom_early_console);
printk("ARCH: ");
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 4306d44..3fdb455 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -555,12 +555,6 @@ static void __init init_sparc64_elf_hwcap(void)
pause_patch();
}
-static inline void register_prom_console(void)
-{
- early_console = &prom_early_console;
- register_console(&prom_early_console);
-}
-
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
@@ -572,7 +566,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_EARLYFB
if (btext_find_display())
#endif
- register_prom_console();
+ register_console(&prom_early_console);
if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n");
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 8c68424..e142545 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -976,12 +976,12 @@ void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *reg
if (unlikely(!mm || (mm == &init_mm)))
return;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
if (unlikely(!CTX_VALID(mm->context)))
get_new_mmu_context(mm);
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
load_secondary_context(mm);
__flush_tlb_mm(CTX_HWBITS(mm->context),
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index d950197..6dee795 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -189,7 +189,8 @@ linux_sparc_syscall32:
mov %i0, %l5 ! IEU1
5: call %l7 ! CTI Group brk forced
srl %i5, 0, %o5 ! IEU1
- ba,a,pt %xcc, 3f
+ ba,pt %xcc, 3f
+ sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
@@ -217,7 +218,6 @@ linux_sparc_syscall:
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 2eaca28..59dbd46 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -199,7 +199,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index a1d35e2..2ebec26 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ec995b0..ed82eda 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -350,7 +350,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
@@ -361,7 +361,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
address, pte_val(pte));
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
void flush_dcache_page(struct page *page)
@@ -661,7 +661,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
EXPORT_SYMBOL(__flush_dcache_range);
/* get_new_mmu_context() uses "cache + 1". */
-DEFINE_RAW_SPINLOCK(ctx_alloc_lock);
+DEFINE_SPINLOCK(ctx_alloc_lock);
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
@@ -683,7 +683,7 @@ void get_new_mmu_context(struct mm_struct *mm)
unsigned long orig_pgsz_bits;
int new_version;
- raw_spin_lock(&ctx_alloc_lock);
+ spin_lock(&ctx_alloc_lock);
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
@@ -719,7 +719,7 @@ void get_new_mmu_context(struct mm_struct *mm)
out:
tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
- raw_spin_unlock(&ctx_alloc_lock);
+ spin_unlock(&ctx_alloc_lock);
if (unlikely(new_version))
smp_new_mmu_context_version();
@@ -2721,7 +2721,7 @@ void hugetlb_setup(struct pt_regs *regs)
if (tlb_type == cheetah_plus) {
unsigned long ctx;
- raw_spin_lock(&ctx_alloc_lock);
+ spin_lock(&ctx_alloc_lock);
ctx = mm->context.sparc64_ctx_val;
ctx &= ~CTX_PGSZ_MASK;
ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
@@ -2742,7 +2742,7 @@ void hugetlb_setup(struct pt_regs *regs)
mm->context.sparc64_ctx_val = ctx;
on_each_cpu(context_reload, mm, 0);
}
- raw_spin_unlock(&ctx_alloc_lock);
+ spin_unlock(&ctx_alloc_lock);
}
}
#endif
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 9eb10b4..2cc3bce 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -73,7 +73,7 @@ void flush_tsb_user(struct tlb_batch *tb)
struct mm_struct *mm = tb->mm;
unsigned long nentries, base, flags;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
@@ -90,14 +90,14 @@ void flush_tsb_user(struct tlb_batch *tb)
__flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
}
#endif
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long nentries, base, flags;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
@@ -114,7 +114,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
}
#endif
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
@@ -392,7 +392,7 @@ retry_tsb_alloc:
* the lock and ask all other cpus running this address space
* to run tsb_context_switch() to see the new TSB table.
*/
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
old_tsb = mm->context.tsb_block[tsb_index].tsb;
old_cache_index =
@@ -407,7 +407,7 @@ retry_tsb_alloc:
*/
if (unlikely(old_tsb &&
(rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
return;
@@ -433,7 +433,7 @@ retry_tsb_alloc:
mm->context.tsb_block[tsb_index].tsb = new_tsb;
setup_tsb_params(mm, tsb_index, new_size);
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
/* If old_tsb is NULL, we're being invoked for the first time
* from init_new_context().
@@ -459,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
#endif
unsigned int i;
- raw_spin_lock_init(&mm->context.lock);
+ spin_lock_init(&mm->context.lock);
mm->context.sparc64_ctx_val = 0UL;
@@ -523,12 +523,12 @@ void destroy_context(struct mm_struct *mm)
free_hot_cold_page(page, 0);
}
- raw_spin_lock_irqsave(&ctx_alloc_lock, flags);
+ spin_lock_irqsave(&ctx_alloc_lock, flags);
if (CTX_VALID(mm->context)) {
unsigned long nr = CTX_NRBITS(mm->context);
mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
}
- raw_spin_unlock_irqrestore(&ctx_alloc_lock, flags);
+ spin_unlock_irqrestore(&ctx_alloc_lock, flags);
}