diff options
author | Hollis Blanchard <hollisb@us.ibm.com> | 2008-07-25 18:54:52 (GMT) |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-10-15 08:15:16 (GMT) |
commit | 83aae4a8098eb8a40a2e9dab3714354182143b4f (patch) | |
tree | 872381c8aa610e3c1053008e967728f121fa55cb /arch/powerpc/kvm/booke_interrupts.S | |
parent | 20754c2495a791b5b429c0da63394c86ade978e7 (diff) | |
download | linux-83aae4a8098eb8a40a2e9dab3714354182143b4f.tar.xz |
KVM: ppc: Write only modified shadow entries into the TLB on exit
Track which TLB entries need to be written, instead of overwriting everything
below the high water mark. Typically only a single guest TLB entry will be
modified in a single exit.
Guest boot time performance improvement: about 15%.
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/powerpc/kvm/booke_interrupts.S')
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 51 |
1 files changed, 34 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 3e88dfa..564ea32 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -335,7 +335,7 @@ lightweight_exit: lwz r3, VCPU_PID(r4) mtspr SPRN_PID, r3 - /* Prevent all TLB updates. */ + /* Prevent all asynchronous TLB updates. */ mfmsr r5 lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l @@ -344,28 +344,45 @@ lightweight_exit: /* Load the guest mappings, leaving the host's "pinned" kernel mappings * in place. */ - /* XXX optimization: load only modified guest entries. */ mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ - lis r8, tlb_44x_hwater@ha - lwz r8, tlb_44x_hwater@l(r8) - addi r9, r4, VCPU_SHADOW_TLB - 4 - li r6, 0 + li r5, PPC44x_TLB_SIZE + lis r5, tlb_44x_hwater@ha + lwz r5, tlb_44x_hwater@l(r5) + mtctr r5 + addi r9, r4, VCPU_SHADOW_TLB + addi r5, r4, VCPU_SHADOW_MOD + li r3, 0 1: + lbzx r7, r3, r5 + cmpwi r7, 0 + beq 3f + /* Load guest entry. */ - lwzu r7, 4(r9) + mulli r11, r3, TLBE_BYTES + add r11, r11, r9 + lwz r7, 0(r11) mtspr SPRN_MMUCR, r7 - lwzu r7, 4(r9) - tlbwe r7, r6, PPC44x_TLB_PAGEID - lwzu r7, 4(r9) - tlbwe r7, r6, PPC44x_TLB_XLAT - lwzu r7, 4(r9) - tlbwe r7, r6, PPC44x_TLB_ATTRIB - /* Increment index. */ - addi r6, r6, 1 - cmpw r6, r8 - blt 1b + lwz r7, 4(r11) + tlbwe r7, r3, PPC44x_TLB_PAGEID + lwz r7, 8(r11) + tlbwe r7, r3, PPC44x_TLB_XLAT + lwz r7, 12(r11) + tlbwe r7, r3, PPC44x_TLB_ATTRIB +3: + addi r3, r3, 1 /* Increment index. */ + bdnz 1b + mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ + /* Clear bitmap of modified TLB entries */ + li r5, PPC44x_TLB_SIZE>>2 + mtctr r5 + addi r5, r4, VCPU_SHADOW_MOD - 4 + li r6, 0 +1: + stwu r6, 4(r5) + bdnz 1b + iccci 0, 0 /* XXX hack */ /* Load some guest volatiles. */ |