summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2016-11-03 20:23:09 (GMT)
committerAlex Shi <alex.shi@linaro.org>2017-05-27 03:16:15 (GMT)
commit041f20060e0f49b51c5096cf2ca95428599d9ae9 (patch)
tree52245c5de5072decad5a4d92801c94d6f83bf585 /arch/arm64/mm
parentfd080ba70802bd73ab1f333507c293cf55048007 (diff)
downloadlinux-041f20060e0f49b51c5096cf2ca95428599d9ae9.tar.xz
arm64: move sp_el0 and tpidr_el1 into cpu_suspend_ctx
When returning from idle, we rely on the fact that thread_info lives at the end of the kernel stack, and restore this by masking the saved stack pointer. Subsequent patches will sever the relationship between the stack and thread_info, and to cater for this we must save/restore sp_el0 explicitly, storing it in cpu_suspend_ctx. As cpu_suspend_ctx must be doubleword aligned, this leaves us with an extra slot in cpu_suspend_ctx. We can use this to save/restore tpidr_el1 in the same way, which simplifies the code, avoiding pointer chasing on the restore path (as we no longer need to load thread_info::cpu followed by the relevant slot in __per_cpu_offset based on this). This patch stashes both registers in cpu_suspend_ctx. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Laura Abbott <labbott@redhat.com> Cc: James Morse <james.morse@arm.com> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit 623b476fc815464a0241ea7483da7b3580b7d8ac) Signed-off-by: Alex Shi <alex.shi@linaro.org>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/proc.S6
1 files changed, 6 insertions, 0 deletions
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 352c73b..6a853a8 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -70,11 +70,14 @@ ENTRY(cpu_do_suspend)
mrs x8, mdscr_el1
mrs x9, oslsr_el1
mrs x10, sctlr_el1
+ mrs x11, tpidr_el1
+ mrs x12, sp_el0
stp x2, x3, [x0]
stp x4, xzr, [x0, #16]
stp x5, x6, [x0, #32]
stp x7, x8, [x0, #48]
stp x9, x10, [x0, #64]
+ stp x11, x12, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@@ -90,6 +93,7 @@ ENTRY(cpu_do_resume)
ldp x6, x8, [x0, #32]
ldp x9, x10, [x0, #48]
ldp x11, x12, [x0, #64]
+ ldp x13, x14, [x0, #80]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@@ -112,6 +116,8 @@ ENTRY(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
+ msr tpidr_el1, x13
+ msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
*/